source
stringlengths
3
92
c
stringlengths
26
2.25M
edgelist_transformation.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Narayanan Sundaram (Intel Corp.) * * ******************************************************************************/ #ifndef EDGELIST_TRANSFORMATIONS_H_ #define EDGELIST_TRANSFORMATIONS_H_ #include "GMDP/utils/edgelist.h" template <typename T> void remove_selfedges(edgelist_t<T>* edgelist) { int new_nnz = 0; edgelist_t<T> new_edgelist(edgelist->m, edgelist->n, edgelist->nnz); for(int i = 0; i < edgelist->nnz; i++) { if (edgelist->edges[i].src != edgelist->edges[i].dst) { new_edgelist.edges[new_nnz] = edgelist->edges[i]; new_nnz++; } } edgelist->clear(); edgelist->edges = new_edgelist.edges; edgelist->nnz = new_nnz; edgelist->m = new_edgelist.m; edgelist->n = new_edgelist.n; return; } template<typename T> bool compare_for_duplicates(const edge_t<T>& e1, const edge_t<T>& e2) { if (e1.src < e2.src) return true; else if (e1.src > e2.src) return false; if (e1.dst < e2.dst) return true; else return false; } template<typename T> void sort_types(edgelist_t<T>* edgelist) { __gnu_parallel::sort(edgelist->edges, edgelist->edges+edgelist->nnz, compare_for_duplicates<T>); } template <typename T> void remove_duplicate_edges_local(edgelist_t<T>* edgelist) { if (edgelist->nnz > 0) { sort_types<T>(edgelist); edgelist_t<T> new_edgelist(edgelist->m, edgelist->n, edgelist->nnz); unsigned long int nnz2 = 0; new_edgelist.edges[0] = edgelist->edges[0]; nnz2=1; for(unsigned long int i = 1; i < edgelist->nnz; i++) { if ((edgelist->edges[i].src == edgelist->edges[i-1].src) && (edgelist->edges[i].dst == edgelist->edges[i-1].dst)) { continue; } else { new_edgelist.edges[nnz2] = edgelist->edges[i]; nnz2++; } } edgelist->clear(); edgelist->edges = new_edgelist.edges; edgelist->nnz = nnz2; edgelist->m = new_edgelist.m; edgelist->n = new_edgelist.n; } } template <typename T> void shuffle_edges(edgelist_t<T>* edgelist) { int m = edgelist->m; int n = edgelist->n; auto nnz_l = edgelist->nnz; int global_nrank = get_global_nrank(); int global_myrank = get_global_myrank(); unsigned long int new_nnz = 0; printf("Rank %d: Before shuffle %d edges\n", global_myrank, edgelist->nnz); edge_t<T> * tedges = new edge_t<T>[nnz_l]; int* histogram = new int[omp_get_max_threads() * global_nrank](); int* offset = new int[omp_get_max_threads() * global_nrank](); int* woffset = new int[omp_get_max_threads() * global_nrank](); memset(histogram, 0, sizeof(int)*omp_get_max_threads() * global_nrank); memset(offset, 0, sizeof(int)*omp_get_max_threads() * global_nrank); memset(woffset, 0, sizeof(int)*omp_get_max_threads() * global_nrank); #pragma omp parallel { int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); auto points_per_thread = nnz_l/nthreads; auto start = tid*points_per_thread; auto end = start + points_per_thread; start = (start > nnz_l)?(nnz_l):(start); end = (end > nnz_l)?(nnz_l):(end); end = (tid == nthreads-1)?(nnz_l):(end); for(auto i = start ; i < end ; i++) { int bin = (edgelist->edges[i].src-1)%global_nrank; assert(bin >= 0 && bin <= global_nrank-1); histogram[tid*global_nrank + bin]+=1; } } offset[0] = 0; for (int bin = 0; bin < global_nrank; bin++) { for (int tid = 0; tid < omp_get_max_threads(); tid++) { if (tid > 0) { offset[tid*global_nrank + bin] = offset[(tid-1)*global_nrank + bin] + histogram[(tid-1)*global_nrank + bin]; } if (tid == 0 && bin > 0) { offset[tid*global_nrank + bin] = offset[(omp_get_max_threads()-1)*global_nrank + bin-1] + histogram[(omp_get_max_threads()-1)*global_nrank + bin-1]; } } } #pragma omp parallel { int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); auto points_per_thread = nnz_l/nthreads; auto start = tid*points_per_thread; auto end = start + points_per_thread; start = (start > nnz_l)?(nnz_l):(start); end = (end > nnz_l)?(nnz_l):(end); end = (tid == nthreads-1)?(nnz_l):(end); for(auto i = start ; i < end ; i++) { int bin = (edgelist->edges[i].src-1)%global_nrank; assert(bin >= 0 && bin <= global_nrank-1); tedges[offset[omp_get_thread_num()*global_nrank + bin] + woffset[omp_get_thread_num()*global_nrank + bin]] = edgelist->edges[i]; woffset[omp_get_thread_num()*global_nrank + bin]++; } } unsigned long int * positions = new unsigned long[global_nrank+1]; unsigned long int * counts = new unsigned long[global_nrank]; unsigned long int * recv_positions = new unsigned long[global_nrank+1]; unsigned long int * recv_counts = new unsigned long[global_nrank]; for (int bin = 0; bin < global_nrank; bin++) { positions[bin] = offset[bin]; counts[bin] = 0; for (int tid = 0; tid < omp_get_max_threads(); tid++) { counts[bin] += histogram[tid*global_nrank + bin]; } } positions[global_nrank] = nnz_l; MPI_Barrier(MPI_COMM_WORLD); MPI_Request* mpi_req = new MPI_Request[2 * global_nrank]; MPI_Status* mpi_status = new MPI_Status[2 * global_nrank]; for (int i = 0; i < global_nrank; i++) { MPI_Isend(&counts[i], 1, MPI_UNSIGNED_LONG, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]); } for (int i = 0; i < global_nrank; i++) { MPI_Irecv(&recv_counts[i], 1, MPI_UNSIGNED_LONG, i, i, MPI_COMM_WORLD, &mpi_req[i + global_nrank]); } MPI_Waitall(2 * global_nrank, mpi_req, mpi_status); MPI_Barrier(MPI_COMM_WORLD); recv_positions[0] = 0; for(int i = 0 ; i < global_nrank ; i++) { new_nnz += recv_counts[i]; recv_positions[i+1] = new_nnz; } MPI_Datatype MPI_EDGE_T; MPI_Type_contiguous(sizeof(edge_t<T>), MPI_CHAR, &MPI_EDGE_T); MPI_Type_commit(&MPI_EDGE_T); for (int i = 0; i < global_nrank; i++) { MPI_Isend(tedges + positions[i], counts[i] , MPI_EDGE_T, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]); } auto received_edges = edgelist_t<T>(m, n, new_nnz); for (int i = 0; i < global_nrank; i++) { MPI_Irecv(received_edges.edges + recv_positions[i], recv_counts[i] , MPI_EDGE_T, i, i, MPI_COMM_WORLD, &mpi_req[i+global_nrank]); } MPI_Waitall(2 * global_nrank, mpi_req, mpi_status); MPI_Barrier(MPI_COMM_WORLD); delete [] mpi_status; delete [] mpi_req; delete [] positions; delete [] counts; delete [] recv_positions; delete [] recv_counts; delete [] tedges; delete [] histogram; delete [] offset; delete [] woffset; printf("Rank %d: After shuffle %ld edges\n", global_myrank, new_nnz); edgelist->clear(); edgelist->edges = received_edges.edges; edgelist->m = m; edgelist->n = n; edgelist->nnz = new_nnz; return; } template <typename T> void remove_duplicate_edges(edgelist_t<T>* edgelist) { // everyone shuffles data to others (disjoint sets based on src), then everyone performs updates locally. if(get_global_nrank() == 1) { remove_duplicate_edges_local(edgelist); } else { shuffle_edges(edgelist); remove_duplicate_edges_local(edgelist); } return; } /* printf("Rank %d: Before shuffle %d edges\n", global_myrank, edgelist->nnz); edge_t<T> * tedges = new edge_t<T>[nnz_l]; int* histogram = new int[omp_get_max_threads() * global_nrank](); int* offset = new int[omp_get_max_threads() * global_nrank](); int* woffset = new int[omp_get_max_threads() * global_nrank](); memset(histogram, 0, sizeof(int)*omp_get_max_threads() * global_nrank); memset(offset, 0, sizeof(int)*omp_get_max_threads() * global_nrank); memset(woffset, 0, sizeof(int)*omp_get_max_threads() * global_nrank); #pragma omp parallel { int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); auto points_per_thread = nnz_l/nthreads; auto start = tid*points_per_thread; auto end = start + points_per_thread; start = (start > nnz_l)?(nnz_l):(start); end = (end > nnz_l)?(nnz_l):(end); end = (tid == nthreads-1)?(nnz_l):(end); for(auto i = start ; i < end ; i++) { //int bin = (edgelist->edges[i].src-1)*global_nrank/n; int bin = (edgelist->edges[i].src-1)%global_nrank; assert(bin >= 0 && bin <= global_nrank-1); histogram[tid*global_nrank + bin]+=1; } } offset[0] = 0; for (int bin = 0; bin < global_nrank; bin++) { for (int tid = 0; tid < omp_get_max_threads(); tid++) { if (tid > 0) { offset[tid*global_nrank + bin] = offset[(tid-1)*global_nrank + bin] + histogram[(tid-1)*global_nrank + bin]; } if (tid == 0 && bin > 0) { offset[tid*global_nrank + bin] = offset[(omp_get_max_threads()-1)*global_nrank + bin-1] + histogram[(omp_get_max_threads()-1)*global_nrank + bin-1]; } } } #pragma omp parallel { int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); auto points_per_thread = nnz_l/nthreads; auto start = tid*points_per_thread; auto end = start + points_per_thread; start = (start > nnz_l)?(nnz_l):(start); end = (end > nnz_l)?(nnz_l):(end); end = (tid == nthreads-1)?(nnz_l):(end); for(auto i = start ; i < end ; i++) { //int bin = (edgelist->edges[i].src-1)*global_nrank/n; int bin = (edgelist->edges[i].src-1)%global_nrank; assert(bin >= 0 && bin <= global_nrank-1); tedges[offset[omp_get_thread_num()*global_nrank + bin] + woffset[omp_get_thread_num()*global_nrank + bin]] = edgelist->edges[i]; woffset[omp_get_thread_num()*global_nrank + bin]++; } } unsigned long int * positions = new unsigned long[global_nrank+1]; unsigned long int * counts = new unsigned long[global_nrank]; unsigned long int * recv_positions = new unsigned long[global_nrank+1]; unsigned long int * recv_counts = new unsigned long[global_nrank]; for (int bin = 0; bin < global_nrank; bin++) { positions[bin] = offset[bin]; counts[bin] = 0; for (int tid = 0; tid < omp_get_max_threads(); tid++) { counts[bin] += histogram[tid*global_nrank + bin]; } } positions[global_nrank] = nnz_l; MPI_Barrier(MPI_COMM_WORLD); MPI_Request* mpi_req = new MPI_Request[2 * global_nrank]; MPI_Status* mpi_status = new MPI_Status[2 * global_nrank]; for (int i = 0; i < global_nrank; i++) { MPI_Isend(&counts[i], 1, MPI_UNSIGNED_LONG, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]); } for (int i = 0; i < global_nrank; i++) { MPI_Irecv(&recv_counts[i], 1, MPI_UNSIGNED_LONG, i, i, MPI_COMM_WORLD, &mpi_req[i + global_nrank]); } MPI_Waitall(2 * global_nrank, mpi_req, mpi_status); MPI_Barrier(MPI_COMM_WORLD); recv_positions[0] = 0; for(int i = 0 ; i < global_nrank ; i++) { new_nnz += recv_counts[i]; recv_positions[i+1] = new_nnz; } MPI_Datatype MPI_EDGE_T; MPI_Type_contiguous(sizeof(edge_t<T>), MPI_CHAR, &MPI_EDGE_T); MPI_Type_commit(&MPI_EDGE_T); for (int i = 0; i < global_nrank; i++) { MPI_Isend(tedges + positions[i], counts[i] , MPI_EDGE_T, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]); } auto received_edges = edgelist_t<T>(m, n, new_nnz); for (int i = 0; i < global_nrank; i++) { MPI_Irecv(received_edges.edges + recv_positions[i], recv_counts[i] , MPI_EDGE_T, i, i, MPI_COMM_WORLD, &mpi_req[i+global_nrank]); } MPI_Waitall(2 * global_nrank, mpi_req, mpi_status); MPI_Barrier(MPI_COMM_WORLD); delete [] mpi_status; delete [] mpi_req; delete [] positions; delete [] counts; delete [] recv_positions; delete [] recv_counts; delete [] tedges; delete [] histogram; delete [] offset; delete [] woffset; printf("Rank %d: After shuffle %ld edges\n", global_myrank, new_nnz); edgelist->clear(); edgelist->edges = received_edges.edges; edgelist->m = m; edgelist->n = n; edgelist->nnz = new_nnz; remove_duplicate_edges_local(edgelist); return; } }*/ template <typename T> void randomize_edge_direction(edgelist_t<T>* edgelist) { for(int i = 0; i < edgelist->nnz; i++) { if ((double)rand()/(double)RAND_MAX < 0.5) { std::swap(edgelist->edges[i].src, edgelist->edges[i].dst); } } } template <typename T> void create_bidirectional_edges(edgelist_t<T>* edgelist) { edgelist_t<T> new_edgelist(edgelist->m, edgelist->n, edgelist->nnz*2); for(int i = 0; i < edgelist->nnz; i++) { new_edgelist.edges[2*i] = edgelist->edges[i]; new_edgelist.edges[2*i+1] = edgelist->edges[i]; std::swap(new_edgelist.edges[2*i+1].src, new_edgelist.edges[2*i+1].dst); } edgelist->clear(); edgelist->edges = new_edgelist.edges; edgelist->nnz = new_edgelist.nnz; edgelist->m = new_edgelist.m; edgelist->n = new_edgelist.n; return; } template <typename T> void convert_to_dag(edgelist_t<T>* edgelist) { for(int i = 0; i < edgelist->nnz; i++) { if (edgelist->edges[i].src > edgelist->edges[i].dst) { std::swap(edgelist->edges[i].src, edgelist->edges[i].dst); } } } template <typename T> void random_edge_weights(edgelist_t<T>* edgelist, int random_range) { for(int i = 0; i < edgelist->nnz; i++) { double t = ((double)rand()/(double)RAND_MAX*(double)random_range); if (t > random_range) t = random_range; if (t < 1) t = 1; edgelist->edges[i].val = (T)t; } } template <typename T> edgelist_t<T> filter_edges(edgelist_t<T>* edgelist, bool(*filter_function)(edge_t<T>, void*), void* param=NULL) { edgelist_t<T> new_edgelist(edgelist->m, edgelist->n, edgelist->nnz); int k = 0; for(int i = 0; i < edgelist->nnz; i++) { if (filter_function(edgelist->edges[i], param)) { new_edgelist.edges[k] = edgelist->edges[i]; k++; } } new_edgelist.nnz = k; return new_edgelist; } #endif
kmp_csupport.c
/* * kmp_csupport.c -- kfront linkage support for OpenMP. */ /* <copyright> Copyright (c) 1997-2016 Intel Corporation. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. </copyright> */ #include "omp.h" /* extern "C" declarations of user-visible routines */ #include "kmp.h" #include "kmp_i18n.h" #include "kmp_itt.h" #include "kmp_error.h" #include "kmp_stats.h" #if OMPT_SUPPORT #include "ompt-internal.h" #include "ompt-specific.h" #endif #define MAX_MESSAGE 512 /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /* flags will be used in future, e.g., to implement */ /* openmp_strict library restrictions */ /*! * @ingroup STARTUP_SHUTDOWN * @param loc in source location information * @param flags in for future use (currently ignored) * * Initialize the runtime library. This call is optional; if it is not made then * it will be implicitly called by attempts to use other library functions. * */ void __kmpc_begin(ident_t *loc, kmp_int32 flags) { // By default __kmp_ignore_mppbeg() returns TRUE. if (__kmp_ignore_mppbeg() == FALSE) { __kmp_internal_begin(); KC_TRACE( 10, ("__kmpc_begin: called\n" ) ); } } /*! * @ingroup STARTUP_SHUTDOWN * @param loc source location information * * Shutdown the runtime library. This is also optional, and even if called will not * do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero. */ void __kmpc_end(ident_t *loc) { // By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op. // However, this can be overridden with KMP_IGNORE_MPPEND environment variable. // If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end() // will unregister this root (it can cause library shut down). if (__kmp_ignore_mppend() == FALSE) { KC_TRACE( 10, ("__kmpc_end: called\n" ) ); KA_TRACE( 30, ("__kmpc_end\n" )); __kmp_internal_end_thread( -1 ); } } /*! @ingroup THREAD_STATES @param loc Source location information. @return The global thread index of the active thread. This function can be called in any context. If the runtime has ony been entered at the outermost level from a single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that which would be returned by omp_get_thread_num() in the outermost active parallel construct. (Or zero if there is no active parallel construct, since the master thread is necessarily thread zero). If multiple non-OpenMP threads all enter an OpenMP construct then this will be a unique thread identifier among all the threads created by the OpenMP runtime (but the value cannote be defined in terms of OpenMP thread ids returned by omp_get_thread_num()). */ kmp_int32 __kmpc_global_thread_num(ident_t *loc) { kmp_int32 gtid = __kmp_entry_gtid(); KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) ); return gtid; } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads under control of the OpenMP<sup>*</sup> runtime This function can be called in any context. It returns the total number of threads under the control of the OpenMP runtime. That is not a number that can be determined by any OpenMP standard calls, since the library may be called from more than one non-OpenMP thread, and this reflects the total over all such calls. Similarly the runtime maintains underlying threads even when they are not active (since the cost of creating and destroying OS threads is high), this call counts all such threads even if they are not waiting for work. */ kmp_int32 __kmpc_global_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) ); return TCR_4(__kmp_nth); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The thread number of the calling thread in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_thread_num(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) ); return __kmp_tid_from_gtid( __kmp_entry_gtid() ); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) ); return __kmp_entry_thread() -> th.th_team -> t.t_nproc; } /*! * @ingroup DEPRECATED * @param loc location description * * This function need not be called. It always returns TRUE. */ kmp_int32 __kmpc_ok_to_fork(ident_t *loc) { #ifndef KMP_DEBUG return TRUE; #else const char *semi2; const char *semi3; int line_no; if (__kmp_par_range == 0) { return TRUE; } semi2 = loc->psource; if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2, ';'); if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2 + 1, ';'); if (semi2 == NULL) { return TRUE; } if (__kmp_par_range_filename[0]) { const char *name = semi2 - 1; while ((name > loc->psource) && (*name != '/') && (*name != ';')) { name--; } if ((*name == '/') || (*name == ';')) { name++; } if (strncmp(__kmp_par_range_filename, name, semi2 - name)) { return __kmp_par_range < 0; } } semi3 = strchr(semi2 + 1, ';'); if (__kmp_par_range_routine[0]) { if ((semi3 != NULL) && (semi3 > semi2) && (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) { return __kmp_par_range < 0; } } if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) { if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) { return __kmp_par_range > 0; } return __kmp_par_range < 0; } return TRUE; #endif /* KMP_DEBUG */ } /*! @ingroup THREAD_STATES @param loc Source location information. @return 1 if this thread is executing inside an active parallel region, zero if not. */ kmp_int32 __kmpc_in_parallel( ident_t *loc ) { return __kmp_entry_thread() -> th.th_root -> r.r_active; } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_threads number of threads requested for this parallel construct Set the number of threads to be used by the next fork spawned by this thread. This call is only required if the parallel construct has a `num_threads` clause. */ void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n", global_tid, num_threads ) ); __kmp_push_num_threads( loc, global_tid, num_threads ); } void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid ) { KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) ); /* the num_threads are automatically popped */ } #if OMP_40_ENABLED void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind ) { KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", global_tid, proc_bind ) ); __kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind ); } #endif /* OMP_40_ENABLED */ /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined parallel construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); #if (KMP_STATS_ENABLED) int inParallel = __kmpc_in_parallel(loc); if (inParallel) { KMP_COUNT_BLOCK(OMP_NESTED_PARALLEL); } else { KMP_STOP_EXPLICIT_TIMER(OMP_serial); KMP_COUNT_BLOCK(OMP_PARALLEL); } #endif // maybe to save thr_state is enough here { va_list ap; va_start( ap, microtask ); #if OMPT_SUPPORT int tid = __kmp_tid_from_gtid( gtid ); kmp_info_t *master_th = __kmp_threads[ gtid ]; kmp_team_t *parent_team = master_th->th.th_team; if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif #if INCLUDE_SSC_MARKS SSC_MARK_FORKING(); #endif __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) microtask, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_task_func, /* TODO: revert workaround for Intel(R) 64 tracker #96 */ #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); #if INCLUDE_SSC_MARKS SSC_MARK_JOINING(); #endif __kmp_join_call( loc, gtid #if OMPT_SUPPORT , fork_context_intel #endif ); va_end( ap ); #if OMPT_SUPPORT if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = 0; } #endif } #if (KMP_STATS_ENABLED) if (!inParallel) KMP_START_EXPLICIT_TIMER(OMP_serial); #endif } #if OMP_40_ENABLED /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_teams number of teams requested for the teams construct @param num_threads number of threads per team requested for the teams construct Set the number of teams to be used by the teams construct. This call is only required if the teams construct has a `num_teams` clause or a `thread_limit` clause (or both). */ void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n", global_tid, num_teams, num_threads ) ); __kmp_push_num_teams( loc, global_tid, num_teams, num_threads ); } /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined teams construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); kmp_info_t *this_thr = __kmp_threads[ gtid ]; va_list ap; va_start( ap, microtask ); KMP_COUNT_BLOCK(OMP_TEAMS); // remember teams entry point and nesting level this_thr->th.th_teams_microtask = microtask; this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host #if OMPT_SUPPORT kmp_team_t *parent_team = this_thr->th.th_team; int tid = __kmp_tid_from_gtid( gtid ); if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif // check if __kmpc_push_num_teams called, set default number of teams otherwise if ( this_thr->th.th_teams_size.nteams == 0 ) { __kmp_push_num_teams( loc, gtid, 0, 0 ); } KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1); __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_teams_master, #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); __kmp_join_call( loc, gtid #if OMPT_SUPPORT , fork_context_intel #endif ); #if OMPT_SUPPORT if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = NULL; } #endif this_thr->th.th_teams_microtask = NULL; this_thr->th.th_teams_level = 0; *(kmp_int64*)(&this_thr->th.th_teams_size) = 0L; va_end( ap ); } #endif /* OMP_40_ENABLED */ // // I don't think this function should ever have been exported. // The __kmpc_ prefix was misapplied. I'm fairly certain that no generated // openmp code ever called it, but it's been exported from the RTL for so // long that I'm afraid to remove the definition. // int __kmpc_invoke_task_func( int gtid ) { return __kmp_invoke_task_func( gtid ); } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Enter a serialized parallel construct. This interface is used to handle a conditional parallel region, like this, @code #pragma omp parallel if (condition) @endcode when the condition is false. */ void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { __kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with * kmp_fork_call since the tasks to be done are similar in each case. */ } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Leave a serialized parallel construct. */ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { kmp_internal_control_t *top; kmp_info_t *this_thr; kmp_team_t *serial_team; KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) ); /* skip all this code for autopar serialized loops since it results in unacceptable overhead */ if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) ) return; // Not autopar code if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); this_thr = __kmp_threads[ global_tid ]; serial_team = this_thr->th.th_serial_team; #if OMP_41_ENABLED kmp_task_team_t * task_team = this_thr->th.th_task_team; // we need to wait for the proxy tasks before finishing the thread if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks ) __kmp_task_team_wait(this_thr, serial_team, NULL ); // is an ITT object needed here? #endif KMP_MB(); KMP_DEBUG_ASSERT( serial_team ); KMP_ASSERT( serial_team -> t.t_serialized ); KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team ); KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr ); /* If necessary, pop the internal control stack values and replace the team values */ top = serial_team -> t.t_control_stack_top; if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) { copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top ); serial_team -> t.t_control_stack_top = top -> next; __kmp_free(top); } //if( serial_team -> t.t_serialized > 1 ) serial_team -> t.t_level--; /* pop dispatch buffers stack */ KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer); { dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer; serial_team->t.t_dispatch->th_disp_buffer = serial_team->t.t_dispatch->th_disp_buffer->next; __kmp_free( disp_buffer ); } -- serial_team -> t.t_serialized; if ( serial_team -> t.t_serialized == 0 ) { /* return to the parallel section */ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) { __kmp_clear_x87_fpu_status_word(); __kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word ); __kmp_load_mxcsr( &serial_team->t.t_mxcsr ); } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ this_thr -> th.th_team = serial_team -> t.t_parent; this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid; /* restore values cached in the thread */ this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */ this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */ this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized; /* TODO the below shouldn't need to be adjusted for serialized teams */ this_thr -> th.th_dispatch = & this_thr -> th.th_team -> t.t_dispatch[ serial_team -> t.t_master_tid ]; __kmp_pop_current_task_from_thread( this_thr ); KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 ); this_thr -> th.th_current_task -> td_flags.executing = 1; if ( __kmp_tasking_mode != tskm_immediate_exec ) { // Copy the task team from the new child / old parent team to the thread. this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]; KA_TRACE(20, ("__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n", global_tid, this_thr->th.th_task_team, this_thr->th.th_team)); } } else { if ( __kmp_tasking_mode != tskm_immediate_exec ) { KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n", global_tid, serial_team, serial_team -> t.t_serialized ) ); } } if ( __kmp_env_consistency_check ) __kmp_pop_parallel( global_tid, NULL ); } /*! @ingroup SYNCHRONIZATION @param loc source location information. Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though depending on the memory ordering convention obeyed by the compiler even that may not be necessary). */ void __kmpc_flush(ident_t *loc) { KC_TRACE( 10, ("__kmpc_flush: called\n" ) ); /* need explicit __mf() here since use volatile instead in library */ KMP_MB(); /* Flush all pending memory write invalidates. */ #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) #if KMP_MIC // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used. // We shouldn't need it, though, since the ABI rules require that // * If the compiler generates NGO stores it also generates the fence // * If users hand-code NGO stores they should insert the fence // therefore no incomplete unordered stores should be visible. #else // C74404 // This is to address non-temporal store instructions (sfence needed). // The clflush instruction is addressed either (mfence needed). // Probably the non-temporal load monvtdqa instruction should also be addressed. // mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2. if ( ! __kmp_cpuinfo.initialized ) { __kmp_query_cpuid( & __kmp_cpuinfo ); }; // if if ( ! __kmp_cpuinfo.sse2 ) { // CPU cannot execute SSE2 instructions. } else { #if KMP_COMPILER_ICC || KMP_COMPILER_MSVC _mm_mfence(); #else __sync_synchronize(); #endif // KMP_COMPILER_ICC }; // if #endif // KMP_MIC #elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64) // Nothing to see here move along #elif KMP_ARCH_PPC64 // Nothing needed here (we have a real MB above). #if KMP_OS_CNK // The flushing thread needs to yield here; this prevents a // busy-waiting thread from saturating the pipeline. flush is // often used in loops like this: // while (!flag) { // #pragma omp flush(flag) // } // and adding the yield here is good for at least a 10x speedup // when running >2 threads per core (on the NAS LU benchmark). __kmp_yield(TRUE); #endif #else #error Unknown or unsupported architecture #endif } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Execute a barrier. */ void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_BARRIER); KMP_TIME_BLOCK(OMP_barrier); KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? }; // if __kmp_check_barrier( global_tid, ct_barrier, loc ); } __kmp_threads[ global_tid ]->th.th_ident = loc; // TODO: explicit barrier_wait_id: // this function is called when 'barrier' directive is present or // implicit barrier at the end of a worksharing construct. // 1) better to add a per-thread barrier counter to a thread data structure // 2) set to 0 when a new team is created // 4) no sync is required __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } /* The BARRIER for a MASTER section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise. */ kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_MASTER); int status = 0; KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) ); if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); if( KMP_MASTER_GTID( global_tid )) { KMP_START_EXPLICIT_TIMER(OMP_master); status = 1; } #if OMPT_SUPPORT && OMPT_TRACE if (status) { if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_master_begin)) { kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } } #endif if ( __kmp_env_consistency_check ) { #if KMP_USE_DYNAMIC_LOCK if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL, 0 ); else __kmp_check_sync( global_tid, ct_master, loc, NULL, 0 ); #else if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL ); else __kmp_check_sync( global_tid, ct_master, loc, NULL ); #endif } return status; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . Mark the end of a <tt>master</tt> region. This should only be called by the thread that executes the <tt>master</tt> region. */ void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) ); KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid )); KMP_STOP_EXPLICIT_TIMER(OMP_master); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_master_end)) { int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif if ( __kmp_env_consistency_check ) { if( global_tid < 0 ) KMP_WARNING( ThreadIdentInvalid ); if( KMP_MASTER_GTID( global_tid )) __kmp_pop_sync( global_tid, ct_master, loc ); } } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. Start execution of an <tt>ordered</tt> construct. */ void __kmpc_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KMP_DEBUG_ASSERT( __kmp_init_serial ); KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); #if USE_ITT_BUILD __kmp_itt_ordered_prep( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { /* OMPT state update */ th->th.ompt_thread_info.wait_id = (uint64_t) loc; th->th.ompt_thread_info.state = ompt_state_wait_ordered; /* OMPT event callback */ if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) { ompt_callbacks.ompt_callback(ompt_event_wait_ordered)( th->th.ompt_thread_info.wait_id); } } #endif if ( th -> th.th_dispatch -> th_deo_fcn != 0 ) (*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_deo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { /* OMPT state update */ th->th.ompt_thread_info.state = ompt_state_work_parallel; th->th.ompt_thread_info.wait_id = 0; /* OMPT event callback */ if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) { ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)( th->th.ompt_thread_info.wait_id); } } #endif #if USE_ITT_BUILD __kmp_itt_ordered_start( gtid ); #endif /* USE_ITT_BUILD */ } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. End execution of an <tt>ordered</tt> construct. */ void __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) ); #if USE_ITT_BUILD __kmp_itt_ordered_end( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; if ( th -> th.th_dispatch -> th_dxo_fcn != 0 ) (*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_dxo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_ordered)) { ompt_callbacks.ompt_callback(ompt_event_release_ordered)( th->th.ompt_thread_info.wait_id); } #endif } #if KMP_USE_DYNAMIC_LOCK static __forceinline void __kmp_init_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_indirect_locktag_t tag) { // Pointer to the allocated indirect lock is written to crit, while indexing is ignored. void *idx; kmp_indirect_lock_t **lck; lck = (kmp_indirect_lock_t **)crit; kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag); KMP_I_LOCK_FUNC(ilk, init)(ilk->lock); KMP_SET_I_LOCK_LOCATION(ilk, loc); KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section); KA_TRACE(20, ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag)); #if USE_ITT_BUILD __kmp_itt_critical_creating(ilk->lock, loc); #endif int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk); if (status == 0) { #if USE_ITT_BUILD __kmp_itt_critical_destroyed(ilk->lock); #endif // We don't really need to destroy the unclaimed lock here since it will be cleaned up at program exit. //KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx); } KMP_DEBUG_ASSERT(*lck != NULL); } // Fast-path acquire tas lock #define KMP_ACQUIRE_TAS_LOCK(lock, gtid) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ if (l->lk.poll != KMP_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ kmp_uint32 spins; \ KMP_FSYNC_PREPARE(l); \ KMP_INIT_YIELD(spins); \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ kmp_backoff_t backoff = __kmp_spin_backoff_params; \ while (l->lk.poll != KMP_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ __kmp_spin_backoff(&backoff); \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ } \ } \ KMP_FSYNC_ACQUIRED(l); \ } // Fast-path test tas lock #define KMP_TEST_TAS_LOCK(lock, gtid, rc) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ rc = l->lk.poll == KMP_LOCK_FREE(tas) && \ KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas)); \ } // Fast-path release tas lock #define KMP_RELEASE_TAS_LOCK(lock, gtid) { \ TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); \ KMP_MB(); \ } #if KMP_USE_FUTEX # include <unistd.h> # include <sys/syscall.h> # ifndef FUTEX_WAIT # define FUTEX_WAIT 0 # endif # ifndef FUTEX_WAKE # define FUTEX_WAKE 1 # endif // Fast-path acquire futex lock #define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ kmp_int32 gtid_code = (gtid+1) << 1; \ KMP_MB(); \ KMP_FSYNC_PREPARE(ftx); \ kmp_int32 poll_val; \ while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \ KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \ kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \ if (!cond) { \ if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) { \ continue; \ } \ poll_val |= KMP_LOCK_BUSY(1, futex); \ } \ kmp_int32 rc; \ if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \ continue; \ } \ gtid_code |= 1; \ } \ KMP_FSYNC_ACQUIRED(ftx); \ } // Fast-path test futex lock #define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1, futex) << 1)) { \ KMP_FSYNC_ACQUIRED(ftx); \ rc = TRUE; \ } else { \ rc = FALSE; \ } \ } // Fast-path release futex lock #define KMP_RELEASE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ KMP_MB(); \ KMP_FSYNC_RELEASING(ftx); \ kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \ if (KMP_LOCK_STRIP(poll_val) & 1) { \ syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \ } \ KMP_MB(); \ KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \ } #endif // KMP_USE_FUTEX #else // KMP_USE_DYNAMIC_LOCK static kmp_user_lock_p __kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid ) { kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit; // // Because of the double-check, the following load // doesn't need to be volatile. // kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); if ( lck == NULL ) { void * idx; // Allocate & initialize the lock. // Remember allocated locks in table in order to free them in __kmp_cleanup() lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section ); __kmp_init_user_lock_with_checks( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_critical_creating( lck ); // __kmp_itt_critical_creating() should be called *before* the first usage of underlying // lock. It is the only place where we can guarantee it. There are chances the lock will // destroyed with no usage, but it is not a problem, because this is not real event seen // by user but rather setting name for object (lock). See more details in kmp_itt.h. #endif /* USE_ITT_BUILD */ // // Use a cmpxchg instruction to slam the start of the critical // section with the lock pointer. If another thread beat us // to it, deallocate the lock, and use the lock that the other // thread allocated. // int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck ); if ( status == 0 ) { // Deallocate the lock and reload the value. #if USE_ITT_BUILD __kmp_itt_critical_destroyed( lck ); // Let ITT know the lock is destroyed and the same memory location may be reused for // another purpose. #endif /* USE_ITT_BUILD */ __kmp_destroy_user_lock_with_checks( lck ); __kmp_user_lock_free( &idx, gtid, lck ); lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); KMP_DEBUG_ASSERT( lck != NULL ); } } return lck; } #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Enter code protected by a `critical` construct. This function blocks until the executing thread can enter the critical section. */ void __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { #if KMP_USE_DYNAMIC_LOCK __kmpc_critical_with_hint(loc, global_tid, crit, omp_lock_hint_none); #else KMP_COUNT_BLOCK(OMP_CRITICAL); kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); //TODO: add THR_OVHD_STATE KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); /* since the critical directive binds to all threads, not just * the current team we have to check this even if we are in a * serialized team */ /* also, even if we are the uber thread, we still have to conduct the lock, * as we have to contend with sibling threads */ #if USE_ITT_BUILD __kmp_itt_critical_acquiring( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_acquire_user_lock_with_checks( lck, global_tid ); #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); #endif // KMP_USE_DYNAMIC_LOCK } #if KMP_USE_DYNAMIC_LOCK // Converts the given hint to an internal lock implementation static __forceinline kmp_dyna_lockseq_t __kmp_map_hint_to_lock(uintptr_t hint) { #if KMP_USE_TSX # define KMP_TSX_LOCK(seq) lockseq_##seq #else # define KMP_TSX_LOCK(seq) __kmp_user_lock_seq #endif // Hints that do not require further logic if (hint & kmp_lock_hint_hle) return KMP_TSX_LOCK(hle); if (hint & kmp_lock_hint_rtm) return (__kmp_cpuinfo.rtm)? KMP_TSX_LOCK(rtm): __kmp_user_lock_seq; if (hint & kmp_lock_hint_adaptive) return (__kmp_cpuinfo.rtm)? KMP_TSX_LOCK(adaptive): __kmp_user_lock_seq; // Rule out conflicting hints first by returning the default lock if ((hint & omp_lock_hint_contended) && (hint & omp_lock_hint_uncontended)) return __kmp_user_lock_seq; if ((hint & omp_lock_hint_speculative) && (hint & omp_lock_hint_nonspeculative)) return __kmp_user_lock_seq; // Do not even consider speculation when it appears to be contended if (hint & omp_lock_hint_contended) return lockseq_queuing; // Uncontended lock without speculation if ((hint & omp_lock_hint_uncontended) && !(hint & omp_lock_hint_speculative)) return lockseq_tas; // HLE lock for speculation if (hint & omp_lock_hint_speculative) return KMP_TSX_LOCK(hle); return __kmp_user_lock_seq; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number. @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. @param hint the lock hint. Enter code protected by a `critical` construct with a hint. The hint value is used to suggest a lock implementation. This function blocks until the executing thread can enter the critical section unless the hint suggests use of speculative execution and the hardware supports it. */ void __kmpc_critical_with_hint( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit, uintptr_t hint ) { KMP_COUNT_BLOCK(OMP_CRITICAL); kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; // Check if it is initialized. if (*lk == 0) { kmp_dyna_lockseq_t lckseq = __kmp_map_hint_to_lock(hint); if (KMP_IS_D_LOCK(lckseq)) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(lckseq)); } else { __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq)); } } // Branch for accessing the actual lock object and set operation. This branching is inevitable since // this lock initialization does not follow the normal dispatch path (lock table is not used). if (KMP_EXTRACT_D_TAG(lk) != 0) { lck = (kmp_user_lock_p)lk; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif # if KMP_USE_INLINED_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { KMP_ACQUIRE_TAS_LOCK(lck, global_tid); } else # elif KMP_USE_INLINED_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid); } else # endif { KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); } } else { kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); } // __kmpc_critical_with_hint #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Leave a critical section, releasing any lock that was held during its execution. */ void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit) { kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid )); #if KMP_USE_DYNAMIC_LOCK if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; KMP_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif # if KMP_USE_INLINED_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { KMP_RELEASE_TAS_LOCK(lck, global_tid); } else # elif KMP_USE_INLINED_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { KMP_RELEASE_FUTEX_LOCK(lck, global_tid); } else # endif { KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); KMP_ASSERT(ilk != NULL); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit)); } KMP_ASSERT(lck != NULL); if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); #if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_release_user_lock_with_checks( lck, global_tid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_critical)) { ompt_callbacks.ompt_callback(ompt_event_release_critical)( (uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid )); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master. The barrier is executed inside this function. */ kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) { int status; KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) __kmp_check_barrier( global_tid, ct_barrier, loc ); #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL ); return (status != 0) ? 0 : 1; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Complete the execution of a combined barrier and master. This function should only be called at the completion of the <tt>master</tt> code. Other threads will still be waiting at the barrier and this call releases them. */ void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid )); __kmp_end_split_barrier ( bs_plain_barrier, global_tid ); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master(nowait) construct. The barrier is executed inside this function. There is no equivalent "end" function, since the */ kmp_int32 __kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid ) { kmp_int32 ret; KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? } __kmp_check_barrier( global_tid, ct_barrier, loc ); } #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); ret = __kmpc_master (loc, global_tid); if ( __kmp_env_consistency_check ) { /* there's no __kmpc_end_master called; so the (stats) */ /* actions of __kmpc_end_master are done here */ if ( global_tid < 0 ) { KMP_WARNING( ThreadIdentInvalid ); } if (ret) { /* only one thread should do the pop since only */ /* one did the push (see __kmpc_master()) */ __kmp_pop_sync( global_tid, ct_master, loc ); } } return (ret); } /* The BARRIER for a SINGLE process section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number @return One if this thread should execute the single construct, zero otherwise. Test whether to execute a <tt>single</tt> construct. There are no implicit barriers in the two "single" calls, rather the compiler should introduce an explicit barrier if it is required. */ kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_SINGLE); kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE ); if(rc == TRUE) { KMP_START_EXPLICIT_TIMER(OMP_single); } #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if (ompt_enabled) { if (rc) { if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id, team->t.ompt_team_info.microtask); } } else { if (ompt_callbacks.ompt_callback(ompt_event_single_others_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_others_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } this_thr->th.ompt_thread_info.state = ompt_state_wait_single; } } #endif return rc; } /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number Mark the end of a <tt>single</tt> construct. This function should only be called by the thread that executed the block of code protected by the `single` construct. */ void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid) { __kmp_exit_single( global_tid ); KMP_STOP_EXPLICIT_TIMER(OMP_single); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif } /*! @ingroup WORK_SHARING @param loc Source location @param global_tid Global thread id Mark the end of a statically scheduled loop. */ void __kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid ) { KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid)); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_end)) { kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_loop_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif if ( __kmp_env_consistency_check ) __kmp_pop_workshare( global_tid, ct_pdo, loc ); } /* * User routines which take C-style arguments (call by value) * different from the Fortran equivalent routines */ void ompc_set_num_threads( int arg ) { // !!!!! TODO: check the per-task binding __kmp_set_num_threads( arg, __kmp_entry_gtid() ); } void ompc_set_dynamic( int flag ) { kmp_info_t *thread; /* For the thread-private implementation of the internal controls */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__dynamic( thread, flag ? TRUE : FALSE ); } void ompc_set_nested( int flag ) { kmp_info_t *thread; /* For the thread-private internal controls implementation */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__nested( thread, flag ? TRUE : FALSE ); } void ompc_set_max_active_levels( int max_active_levels ) { /* TO DO */ /* we want per-task implementation of this internal control */ /* For the per-thread internal controls implementation */ __kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels ); } void ompc_set_schedule( omp_sched_t kind, int modifier ) { // !!!!! TODO: check the per-task binding __kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier ); } int ompc_get_ancestor_thread_num( int level ) { return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level ); } int ompc_get_team_size( int level ) { return __kmp_get_team_size( __kmp_entry_gtid(), level ); } void kmpc_set_stacksize( int arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_stacksize_s( size_t arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_blocktime( int arg ) { int gtid, tid; kmp_info_t *thread; gtid = __kmp_entry_gtid(); tid = __kmp_tid_from_gtid(gtid); thread = __kmp_thread_from_gtid(gtid); __kmp_aux_set_blocktime( arg, thread, tid ); } void kmpc_set_library( int arg ) { // __kmp_user_set_library initializes the library if needed __kmp_user_set_library( (enum library_type)arg ); } void kmpc_set_defaults( char const * str ) { // __kmp_aux_set_defaults initializes the library if needed __kmp_aux_set_defaults( str, KMP_STRLEN( str ) ); } void kmpc_set_disp_num_buffers( int arg ) { // ignore after initialization because some teams have already // allocated dispatch buffers if( __kmp_init_serial == 0 && arg > 0 ) __kmp_dispatch_num_buffers = arg; } int kmpc_set_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_set_affinity_mask_proc( proc, mask ); #endif } int kmpc_unset_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_unset_affinity_mask_proc( proc, mask ); #endif } int kmpc_get_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_get_affinity_mask_proc( proc, mask ); #endif } /* -------------------------------------------------------------------------- */ /*! @ingroup THREADPRIVATE @param loc source location information @param gtid global thread number @param cpy_size size of the cpy_data buffer @param cpy_data pointer to data to be copied @param cpy_func helper function to call for copying data @param didit flag variable: 1=single thread; 0=not single thread __kmpc_copyprivate implements the interface for the private data broadcast needed for the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran). All threads participating in the parallel region call this routine. One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1 and all other threads should have that variable set to 0. All threads pass a pointer to a data buffer (cpy_data) that they have built. The OpenMP specification forbids the use of nowait on the single region when a copyprivate clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid race conditions, so the code generation for the single region should avoid generating a barrier after the call to @ref __kmpc_copyprivate. The <tt>gtid</tt> parameter is the global thread id for the current thread. The <tt>loc</tt> parameter is a pointer to source location information. Internal implementation: The single thread will first copy its descriptor address (cpy_data) to a team-private location, then the other threads will each call the function pointed to by the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer. The cpy_func routine used for the copy and the contents of the data area defined by cpy_data and cpy_size may be built in any fashion that will allow the copy to be done. For instance, the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers to the data. The cpy_func routine must interpret the cpy_data buffer appropriately. The interface to cpy_func is as follows: @code void cpy_func( void *destination, void *source ) @endcode where void *destination is the cpy_data pointer for the thread being copied to and void *source is the cpy_data pointer for the thread being copied from. */ void __kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit ) { void **data_ptr; KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid )); KMP_MB(); data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data; if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); } } /* ToDo: Optimize the following two barriers into some kind of split barrier */ if (didit) *data_ptr = cpy_data; /* This barrier is not a barrier region boundary */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); if (! didit) (*cpy_func)( cpy_data, *data_ptr ); /* Consider next barrier the user-visible barrier for barrier region boundaries */ /* Nesting checks are already handled by the single construct checks */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location) #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); } /* -------------------------------------------------------------------------- */ #define INIT_LOCK __kmp_init_user_lock_with_checks #define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks #define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks #define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed #define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks #define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed #define RELEASE_LOCK __kmp_release_user_lock_with_checks #define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks #define TEST_LOCK __kmp_test_user_lock_with_checks #define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks #define DESTROY_LOCK __kmp_destroy_user_lock_with_checks #define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks /* * TODO: Make check abort messages use location info & pass it * into with_checks routines */ #if KMP_USE_DYNAMIC_LOCK // internal lock initializer static __forceinline void __kmp_init_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) { if (KMP_IS_D_LOCK(seq)) { KMP_INIT_D_LOCK(lock, seq); #if USE_ITT_BUILD __kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL); #endif } else { KMP_INIT_I_LOCK(lock, seq); #if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); __kmp_itt_lock_creating(ilk->lock, loc); #endif } } // internal nest lock initializer static __forceinline void __kmp_init_nest_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) { #if KMP_USE_TSX // Don't have nested lock implementation for speculative locks if (seq == lockseq_hle || seq == lockseq_rtm || seq == lockseq_adaptive) seq = __kmp_user_lock_seq; #endif switch (seq) { case lockseq_tas: seq = lockseq_nested_tas; break; #if KMP_USE_FUTEX case lockseq_futex: seq = lockseq_nested_futex; break; #endif case lockseq_ticket: seq = lockseq_nested_ticket; break; case lockseq_queuing: seq = lockseq_nested_queuing; break; case lockseq_drdpa: seq = lockseq_nested_drdpa; break; default: seq = lockseq_nested_queuing; } KMP_INIT_I_LOCK(lock, seq); #if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); __kmp_itt_lock_creating(ilk->lock, loc); #endif } /* initialize the lock with a hint */ void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) { KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock_with_hint"); } __kmp_init_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); } /* initialize the lock with a hint */ void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) { KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock_with_hint"); } __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); } #endif // KMP_USE_DYNAMIC_LOCK /* initialize the lock */ void __kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock"); } __kmp_init_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_lock /* initialize the lock */ void __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock"); } __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_nest_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_NESTED_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_nest_lock void __kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_user_lock_p lck; if (KMP_EXTRACT_D_TAG(user_lock) == 0) { lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock; } else { lck = (kmp_user_lock_p)user_lock; } __kmp_itt_lock_destroyed(lck); # endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_lock /* destroy the lock */ void __kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock); __kmp_itt_lock_destroyed(ilk->lock); # endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_NESTED_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_nest_lock void __kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { KMP_COUNT_BLOCK(OMP_set_lock); #if KMP_USE_DYNAMIC_LOCK int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object. # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_ACQUIRE_TAS_LOCK(user_lock, gtid); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid); } # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_LOCK( lck, gtid ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); #endif #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_NESTED_LOCK( lck, gtid ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_RELEASE_TAS_LOCK(user_lock, gtid); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_RELEASE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid); } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ /* release the lock */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0); KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ RELEASE_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_lock)) { ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* release the lock */ void __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock; #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ if ( --(tl->lk.depth_locked) == 0 ) { TCW_4(tl->lk.poll, 0); } KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ int release_status; release_status = RELEASE_NESTED_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled) { if (release_status == KMP_LOCK_RELEASED) { if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)( (uint64_t) lck); } } else if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)( (uint64_t) lck); } } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { KMP_COUNT_BLOCK(OMP_test_lock); #if KMP_USE_DYNAMIC_LOCK int rc; int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_TEST_TAS_LOCK(user_lock, gtid, rc); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc); } else # endif { rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid); } if (rc) { # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif return FTN_TRUE; } else { # if USE_ITT_BUILD __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); # endif return FTN_FALSE; } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return ( rc ? FTN_TRUE : FTN_FALSE ); /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int rc; # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD if (rc) { __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); } else { __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); } # endif return rc; #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_NESTED_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return rc; /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /*--------------------------------------------------------------------------------------------------------------------*/ /* * Interface to fast scalable reduce methods routines */ // keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions; // another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then) // AT: which solution is better? #define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \ ( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) ) #define __KMP_GET_REDUCTION_METHOD(gtid) \ ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) // description of the packed_reduction_method variable: look at the macros in kmp.h // used in a critical section reduce block static __forceinline void __kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { // this lock was visible to a customer and to the threading profile tool as a serial overhead span // (although it's used for an internal purpose only) // why was it visible in previous implementation? // should we keep it visible in new reduce block? kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; // Check if it is initialized. if (*lk == 0) { if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(__kmp_user_lock_seq)); } else { __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(__kmp_user_lock_seq)); } } // Branch for accessing the actual lock object and set operation. This branching is inevitable since // this lock initialization does not follow the normal dispatch path (lock table is not used). if (KMP_EXTRACT_D_TAG(lk) != 0) { lck = (kmp_user_lock_p)lk; KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); } else { kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); lck = ilk->lock; KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers // with 32 byte critical sections. If there isn't enough space, then we // have to use a pointer. if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) { lck = (kmp_user_lock_p)crit; } else { lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } KMP_DEBUG_ASSERT( lck != NULL ); if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); __kmp_acquire_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // used in a critical section reduce block static __forceinline void __kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical // sections. If there isn't enough space, then we have to use a pointer. if ( __kmp_base_user_lock_size > 32 ) { lck = *( (kmp_user_lock_p *) crit ); KMP_ASSERT( lck != NULL ); } else { lck = (kmp_user_lock_p) crit; } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); __kmp_release_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // __kmp_end_critical_section_reduce_block /* 2.a.i. Reduce Block without a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed The nowait version is used for a reduce clause with the nowait argument. */ kmp_int32 __kmpc_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_nowait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; #if OMP_40_ENABLED kmp_team_t *team; kmp_info_t *th; int teams_swapped = 0, task_state; #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be used as a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif #if OMP_40_ENABLED th = __kmp_thread_from_gtid(global_tid); if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct? team = th->th.th_team; if( team->t.t_level == th->th.th_teams_level ) { // this is reduction at teams construct KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0 // Let's swap teams temporarily for the reduction barrier teams_swapped = 1; th->th.th_info.ds.ds_tid = team->t.t_master_tid; th->th.th_team = team->t.t_parent; th->th.th_team_nproc = th->th.th_team->t.t_nproc; th->th.th_task_team = th->th.th_team->t.t_task_team[0]; task_state = th->th.th_task_state; th->th.th_task_state = 0; } } #endif // OMP_40_ENABLED // packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable // the variable should be either a construct-specific or thread-specific property, not a team specific property // (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct) // an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?) // (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed) // a thread-specific variable is better regarding two issues above (next construct and extra syncs) // a thread-specific "th_local.reduction_method" variable is used currently // each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs) packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; // all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen) // (it's not quite good, because the checking block has been closed by this 'pop', // but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction) if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //AT: performance issue: a real barrier here //AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them) //AT: (it's not what a customer might expect specifying NOWAIT clause) //AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer) //AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster // and be more in line with sense of NOWAIT //AT: TO DO: do epcc test and compare times // this barrier should be invisible to a customer and to the threading profile tool // (it's neither a terminating barrier nor customer's code, it's used for an internal purpose) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers will get to __kmpc_end_reduce_nowait() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } #if OMP_40_ENABLED if( teams_swapped ) { // Restore thread structure th->th.th_info.ds.ds_tid = 0; th->th.th_team = team; th->th.th_team_nproc = team->t.t_nproc; th->th.th_task_team = team->t.t_task_team[task_state]; th->th.th_task_state = task_state; } #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a reduce nowait. */ void __kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( on Intel platforms only ) } else if( packed_reduction_method == atomic_reduce_block ) { // neither master nor other workers should get here // (code gen does not generate this call in case 2: atomic reduce block) // actually it's better to remove this elseif at all; // after removal this value will checked by the 'else' and will assert } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master gets here } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } /* 2.a.ii. Reduce Block with a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed A blocking reduce that includes an implicit barrier. */ kmp_int32 __kmpc_reduce( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_wait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //case tree_reduce_block: // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers except master will enter __kmpc_end_reduce() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { // 0: all other workers; 1: master __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a blocking reduce. The <tt>lck</tt> pointer must be the same as that used in the corresponding start function. */ void __kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == atomic_reduce_block ) { // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master executes here (master releases all other workers) __kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid ); } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } #undef __KMP_GET_REDUCTION_METHOD #undef __KMP_SET_REDUCTION_METHOD /*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/ kmp_uint64 __kmpc_get_taskid() { kmp_int32 gtid; kmp_info_t * thread; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); return thread->th.th_current_task->td_task_id; } // __kmpc_get_taskid kmp_uint64 __kmpc_get_parent_taskid() { kmp_int32 gtid; kmp_info_t * thread; kmp_taskdata_t * parent_task; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); parent_task = thread->th.th_current_task->td_parent; return ( parent_task == NULL ? 0 : parent_task->td_task_id ); } // __kmpc_get_parent_taskid void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT) { if ( ! __kmp_init_serial ) { __kmp_serial_initialize(); } __kmp_place_num_sockets = nS; __kmp_place_socket_offset = sO; __kmp_place_num_cores = nC; __kmp_place_core_offset = cO; __kmp_place_num_threads_per_core = nT; } #if OMP_41_ENABLED /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. @param num_dims number of associated doacross loops. @param dims info on loops bounds. Initialize doacross loop information. Expect compiler send us inclusive bounds, e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2. */ void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, struct kmp_dim * dims) { int j, idx; kmp_int64 last, trace_count; kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_uint32 *flags; kmp_disp_t *pr_buf = th->th.th_dispatch; dispatch_shared_info_t *sh_buf; KA_TRACE(20,("__kmpc_doacross_init() enter: called T#%d, num dims %d, active %d\n", gtid, num_dims, !team->t.t_serialized)); KMP_DEBUG_ASSERT(dims != NULL); KMP_DEBUG_ASSERT(num_dims > 0); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_init() exit: serialized team\n")); return; // no dependencies if team is serialized } KMP_DEBUG_ASSERT(team->t.t_nproc > 1); idx = pr_buf->th_doacross_buf_idx++; // Increment index of shared buffer for the next loop sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers]; // Save bounds info into allocated private buffer KMP_DEBUG_ASSERT(pr_buf->th_doacross_info == NULL); pr_buf->th_doacross_info = (kmp_int64*)__kmp_thread_malloc(th, sizeof(kmp_int64)*(4 * num_dims + 1)); KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); pr_buf->th_doacross_info[0] = (kmp_int64)num_dims; // first element is number of dimensions // Save also address of num_done in order to access it later without knowing the buffer index pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done; pr_buf->th_doacross_info[2] = dims[0].lo; pr_buf->th_doacross_info[3] = dims[0].up; pr_buf->th_doacross_info[4] = dims[0].st; last = 5; for( j = 1; j < num_dims; ++j ) { kmp_int64 range_length; // To keep ranges of all dimensions but the first dims[0] if( dims[j].st == 1 ) { // most common case // AC: should we care of ranges bigger than LLONG_MAX? (not for now) range_length = dims[j].up - dims[j].lo + 1; } else { if( dims[j].st > 0 ) { KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo); range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1; } else { // negative increment KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up); range_length = (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1; } } pr_buf->th_doacross_info[last++] = range_length; pr_buf->th_doacross_info[last++] = dims[j].lo; pr_buf->th_doacross_info[last++] = dims[j].up; pr_buf->th_doacross_info[last++] = dims[j].st; } // Compute total trip count. // Start with range of dims[0] which we don't need to keep in the buffer. if( dims[0].st == 1 ) { // most common case trace_count = dims[0].up - dims[0].lo + 1; } else if( dims[0].st > 0 ) { KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo); trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1; } else { // negative increment KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up); trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1; } for( j = 1; j < num_dims; ++j ) { trace_count *= pr_buf->th_doacross_info[4 * j + 1]; // use kept ranges } KMP_DEBUG_ASSERT(trace_count > 0); // Check if shared buffer is not occupied by other loop (idx - __kmp_dispatch_num_buffers) if( idx != sh_buf->doacross_buf_idx ) { // Shared buffer is occupied, wait for it to be free __kmp_wait_yield_4( (kmp_uint32*)&sh_buf->doacross_buf_idx, idx, __kmp_eq_4, NULL ); } // Check if we are the first thread. After the CAS the first thread gets 0, // others get 1 if initialization is in progress, allocated pointer otherwise. flags = (kmp_uint32*)KMP_COMPARE_AND_STORE_RET64( (kmp_int64*)&sh_buf->doacross_flags,NULL,(kmp_int64)1); if( flags == NULL ) { // we are the first thread, allocate the array of flags kmp_int64 size = trace_count / 8 + 8; // in bytes, use single bit per iteration sh_buf->doacross_flags = (kmp_uint32*)__kmp_thread_calloc(th, size, 1); } else if( (kmp_int64)flags == 1 ) { // initialization is still in progress, need to wait while( (volatile kmp_int64)sh_buf->doacross_flags == 1 ) { KMP_YIELD(TRUE); } } KMP_DEBUG_ASSERT((kmp_int64)sh_buf->doacross_flags > 1); // check value of pointer pr_buf->th_doacross_flags = sh_buf->doacross_flags; // save private copy in order to not // touch shared buffer on each iteration KA_TRACE(20,("__kmpc_doacross_init() exit: T#%d\n", gtid)); } void __kmpc_doacross_wait(ident_t *loc, int gtid, long long *vec) { kmp_int32 shft, num_dims, i; kmp_uint32 flag; kmp_int64 iter_number; // iteration number of "collapsed" loop nest kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf; kmp_int64 lo, up, st; KA_TRACE(20,("__kmpc_doacross_wait() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_wait() exit: serialized team\n")); return; // no dependencies if team is serialized } // calculate sequential iteration number and check out-of-bounds condition pr_buf = th->th.th_dispatch; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); num_dims = pr_buf->th_doacross_info[0]; lo = pr_buf->th_doacross_info[2]; up = pr_buf->th_doacross_info[3]; st = pr_buf->th_doacross_info[4]; if( st == 1 ) { // most common case if( vec[0] < lo || vec[0] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = vec[0] - lo; } else if( st > 0 ) { if( vec[0] < lo || vec[0] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = (kmp_uint64)(vec[0] - lo) / st; } else { // negative increment if( vec[0] > lo || vec[0] < up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = (kmp_uint64)(lo - vec[0]) / (-st); } for( i = 1; i < num_dims; ++i ) { kmp_int64 iter, ln; kmp_int32 j = i * 4; ln = pr_buf->th_doacross_info[j + 1]; lo = pr_buf->th_doacross_info[j + 2]; up = pr_buf->th_doacross_info[j + 3]; st = pr_buf->th_doacross_info[j + 4]; if( st == 1 ) { if( vec[i] < lo || vec[i] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = vec[i] - lo; } else if( st > 0 ) { if( vec[i] < lo || vec[i] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = (kmp_uint64)(vec[i] - lo) / st; } else { // st < 0 if( vec[i] > lo || vec[i] < up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = (kmp_uint64)(lo - vec[i]) / (-st); } iter_number = iter + ln * iter_number; } shft = iter_number % 32; // use 32-bit granularity iter_number >>= 5; // divided by 32 flag = 1 << shft; while( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) { KMP_YIELD(TRUE); } KA_TRACE(20,("__kmpc_doacross_wait() exit: T#%d wait for iter %lld completed\n", gtid, (iter_number<<5)+shft)); } void __kmpc_doacross_post(ident_t *loc, int gtid, long long *vec) { kmp_int32 shft, num_dims, i; kmp_uint32 flag; kmp_int64 iter_number; // iteration number of "collapsed" loop nest kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf; kmp_int64 lo, st; KA_TRACE(20,("__kmpc_doacross_post() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_post() exit: serialized team\n")); return; // no dependencies if team is serialized } // calculate sequential iteration number (same as in "wait" but no out-of-bounds checks) pr_buf = th->th.th_dispatch; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); num_dims = pr_buf->th_doacross_info[0]; lo = pr_buf->th_doacross_info[2]; st = pr_buf->th_doacross_info[4]; if( st == 1 ) { // most common case iter_number = vec[0] - lo; } else if( st > 0 ) { iter_number = (kmp_uint64)(vec[0] - lo) / st; } else { // negative increment iter_number = (kmp_uint64)(lo - vec[0]) / (-st); } for( i = 1; i < num_dims; ++i ) { kmp_int64 iter, ln; kmp_int32 j = i * 4; ln = pr_buf->th_doacross_info[j + 1]; lo = pr_buf->th_doacross_info[j + 2]; st = pr_buf->th_doacross_info[j + 4]; if( st == 1 ) { iter = vec[i] - lo; } else if( st > 0 ) { iter = (kmp_uint64)(vec[i] - lo) / st; } else { // st < 0 iter = (kmp_uint64)(lo - vec[i]) / (-st); } iter_number = iter + ln * iter_number; } shft = iter_number % 32; // use 32-bit granularity iter_number >>= 5; // divided by 32 flag = 1 << shft; if( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) KMP_TEST_THEN_OR32( (kmp_int32*)&pr_buf->th_doacross_flags[iter_number], (kmp_int32)flag ); KA_TRACE(20,("__kmpc_doacross_post() exit: T#%d iter %lld posted\n", gtid, (iter_number<<5)+shft)); } void __kmpc_doacross_fini(ident_t *loc, int gtid) { kmp_int64 num_done; kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf = th->th.th_dispatch; KA_TRACE(20,("__kmpc_doacross_fini() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_fini() exit: serialized team %p\n", team)); return; // nothing to do } num_done = KMP_TEST_THEN_INC64((kmp_int64*)pr_buf->th_doacross_info[1]) + 1; if( num_done == th->th.th_team_nproc ) { // we are the last thread, need to free shared resources int idx = pr_buf->th_doacross_buf_idx - 1; dispatch_shared_info_t *sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers]; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info[1] == (kmp_int64)&sh_buf->doacross_num_done); KMP_DEBUG_ASSERT(num_done == (kmp_int64)sh_buf->doacross_num_done); KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx); __kmp_thread_free(th, (void*)sh_buf->doacross_flags); sh_buf->doacross_flags = NULL; sh_buf->doacross_num_done = 0; sh_buf->doacross_buf_idx += __kmp_dispatch_num_buffers; // free buffer for future re-use } // free private resources (need to keep buffer index forever) __kmp_thread_free(th, (void*)pr_buf->th_doacross_info); pr_buf->th_doacross_info = NULL; KA_TRACE(20,("__kmpc_doacross_fini() exit: T#%d\n", gtid)); } #endif // end of file //
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2013 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types </li> // <li> \ref vector_operations </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types </li> // <li> \ref matrix_operations </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_columns </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // </li> // <li> \ref custom_operations </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref openmp_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref configuration_files </li> // <li> \ref custom_data_types </li> // <li> \ref error_reporting_customization </li> // <li> \ref intra_statement_optimization </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // In order for \b Blaze to work properly, the Boost library must be installed on the system. It // is recommended to use the newest Boost library available, but \b Blaze requires at minimum the // Boost version 1.54.0. If you don't have Boost installed on your system, you can download it for // free from 'http://www.boost.org'. // // Additionally, for maximum performance \b Blaze expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Furthermore, for computing the determinant of a dense matrix and for the dense matrix inversion // \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either of // these features is used it is necessary to link the LAPACK library to the final executable. If // no LAPACK library is available the use of these features will result in a linker error. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_installation_unix Linux/MacOSX User // // The first step is the installation of the header files. Since \b Blaze only consists of header // files, the <tt>./blaze</tt> subdirectory can be simply copied to a standard include directory // (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Windows User // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects of // \b Blaze can be adapted to specific requirements, environments and architectures by customizing // the header files in the <tt>./blaze/config/</tt> subdirectory. Since the default settings are // reasonable for most systems this step can also be skipped. However, in order to achieve maximum // performance a customization of at least the following configuration files is required: // // - <b><tt>./blaze/config/BLAS.h</tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt>./blaze/config/CacheSize.h</tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt>./blaze/config/Thresholds.h</tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = 6 3 2 \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <blaze/Math.h> using namespace blaze; // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode // The output of this program is \code y = 16 2 C = ( -1 -1 ) ( 0 4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector) // and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified // as either column vectors or row vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_static_vector StaticVector // <hr> // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of a 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_custom_vector CustomVector // <hr> // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays typedef CustomVector<int,unaligned,unpadded,columnVector> UnalignedUnpadded; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays typedef CustomVector<float,unaligned,padded,columnVector> UnalignedPadded; UnalignedPadded b( new float[16], 9UL, 16UL, blaze::ArrayDelete() ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays typedef CustomVector<double,aligned,unpadded,rowVector> AlignedUnpadded; AlignedUnpadded c( blaze::allocate<double>( 7UL ), 7UL, blaze::Deallocate() ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays typedef CustomVector<complex<double>,aligned,padded,columnVector> AlignedPadded; AlignedPadded d( allocate< complex<double> >( 8UL ), 5UL, 8UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \n \subsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // When constructing a custom vector there are two choices: Either a user manually manages the // array of elements outside the custom vector, or alternatively passes the responsibility for // the memory management to an instance of CustomVector. In the second case the CustomVector // class employs shared ownership between all copies of the custom vector, which reference the // same array. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::ArrayDelete; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; using blaze::columnVector; using blaze::rowVector; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom row vector with size 3 for unaligned, unpadded integer arrays. // The responsibility for the memory management is passed to the custom vector by // providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction // of the custom vector. CustomVector<int,unaligned,unpadded,rowVector> b( new int[3], 3UL, ArrayDelete() ); // Definition of a custom vector with size 3 and capacity 16 with aligned and padded // integer array. The memory management is passed to the custom vector by providing a // deleter of type 'blaze::Deallocate'. CustomVector<int,aligned,padded> c( allocate<int>( 16UL ), 3UL, 16UL, Deallocate() ); \endcode // It is possible to pass any type of deleter to the constructor. The deleter is only required // to provide a function call operator that can be passed the pointer to the managed array. As // an example the following code snipped shows the implementation of two native \b Blaze deleters // blaze::ArrayDelete and blaze::Deallocate: \code namespace blaze { struct ArrayDelete { template< typename Type > inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); } }; struct Deallocate { template< typename Type > inline void operator()( Type ptr ) const { deallocate( ptr ); } }; } // namespace blaze \endcode // \n \subsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; typedef CustomVector<int,unaligned,unpadded> CustomType; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affect vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. In case a deleter has been provided to the first // custom vector, both vectors share the responsibility to destroy the array when the last vector // goes out of scope. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \n \subsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unpadded; int* array = blaze::allocate<int>( 5UL ); // Needs to be 32-bit aligned CustomVector<int,aligned,unpadded> a( array, 5UL, Deallocate() ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \n \subsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomVector<double,aligned,padded> CustomType; // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); CustomType b( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); CustomType c( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomVector<double,aligned,unpadded> CustomType; // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL, Deallocate() ); CustomType b( allocate<double>( 3UL ), 3UL, Deallocate() ); CustomType c( allocate<double>( 3UL ), 3UL, Deallocate() ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is a multiple of the SIMD vector width. In case of unaligned // padded vectors \f$ N-1 \f$ additional padding elements are required, where \f$ N \f$ is // the SIMD vector width. In case the padding is insufficient with respect to the available // instruction set, a \c std::invalid_argument exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \section vector_types_compressed_vector CompressedVector // <hr> // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array. If the vector is initialized from a dynamic array, the constructor expects the // actual size of the array as first argument, the array as second argument. In case of a static // array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense vector classes can be directly initialized by means of an initializer // list: \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; \endcode // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v16( v7 ); // Instantiation of the dense column vector v16 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v17( v9 ); // Instantiation of the dense row vector v17 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v18( v1 ); // Instantiation of the sparse column vector v18 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v19( v12 ); // Instantiation of the sparse row vector v19 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v23( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v24( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense vector: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 4.0, -1.7, 8.6, -7.2 }; \endcode // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore, all // vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end(), and \c cend() functions to traverse the currently contained elements by iterators. // In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a // manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or // \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedVector; CompressedVector<int> v1( 10UL ); // ... Initialization of the vector // Traversing the vector by Iterator for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) { // ... } \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, spares vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. The first // option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the vector // the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In contrast // to the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_member_functions Member Functions // <hr> // // \subsection vector_operations_size .size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are not preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code typedef blaze::DynamicVector<int,rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v1( 10UL ); // Creating a dynamic vector of size 10 SubvectorType sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // // \n \section vector_operations_free_functions Free Functions // <hr> // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, rows, and columns) is in default state if all its elements are in default state. // For instance, in case the vector is instantiated for a built-in integral or floating point data // type, the function returns \c true in case all vector elements are 0 and \c false in case any // vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors also the zero elements are also taken into account! // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse vector, respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, 4 }; blaze::StaticVector<int,4UL,rowVector> b{ -5, 2, -7, -4 }; min( a ); // Returns -5 min( b ); // Returns -7 max( a ); // Returns 7 max( b ); // Returns 2 \endcode // In case the vector currently has a size of 0, both functions return 0. Additionally, in case // a given sparse vector is not completely filled, the zero elements are taken into account. For // example: the following compressed vector has only 2 non-zero elements. However, the minimum // of this vector is 0: \code blaze::CompressedVector<int> c( 4UL, 2UL ); c[0] = 1; c[2] = 3; min( c ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operators_floor_ceil floor() / ceil() // // The \c floor() and \c ceil() functions can be used to round down/up each element of a vector, // respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil( a ); // Rounding up each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = pow( a, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection vector_operations_exp exp() // // \c exp() computes the base e exponential of each element of a vector: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log10() // // The \c log() and \c log10() functions can be used to compute the natural and common logarithm // of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_foreach forEach() // // Via the \c forEach() function it is possible to execute custom operations on dense and sparse // vectors. For instance, the following example demonstrates a custom square root computation via // a lambda: \code blaze::DynamicVector<double> a, b; b = forEach( a, []( double d ) { return std::sqrt( d ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length of a vector, both the \c length() and \c sqrLength() function // can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_transpose trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_conjugate_transpose ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be // stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_static_matrix StaticMatrix // <hr> // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, and the storage order of the matrix // can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of a 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns and the storage order of the // matrix can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class HybridMatrix; \endcode // - Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - M : specifies the maximum number of rows of the matrix. // - N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays typedef CustomMatrix<int,unaligned,unpadded,rowMajor> UnalignedUnpadded; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays typedef CustomMatrix<float,unaligned,padded,columnMajor> UnalignedPadded; UnalignedPadded B( new float[40], 5UL, 6UL, 8UL, blaze::ArrayDelete() ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays typedef CustomMatrix<double,aligned,unpadded,rowMajor> AlignedUnpadded; AlignedUnpadded C( blaze::allocate<double>( 192UL ), 12UL, 13UL, 16UL, blaze::Deallocate ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays typedef CustomMatrix<complex<double>,aligned,padded,columnMajor> AlignedPadded; AlignedPadded D( blaze::allocate<double>( 112UL ), 7UL, 14UL, 16UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \n \subsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // When constructing a custom matrix there are two choices: Either a user manually manages the // array of elements outside the custom matrix, or alternatively passes the responsibility for // the memory management to an instance of CustomMatrix. In the second case the CustomMatrix // class employs shared ownership between all copies of the custom matrix, which reference the // same array. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::ArrayDelete; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a 3x4 custom row-major matrix for unaligned, unpadded integer arrays. // The responsibility for the memory management is passed to the custom matrix by // providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction // of the custom matrix. CustomMatrix<int,unaligned,unpadded,rowMajor> B( new int[12], 3UL, 4UL, ArrayDelete() ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). The memory management is passed // to the custom matrix by providing a deleter of type 'blaze::Deallocate'. CustomMatrix<int,aligned,padded> C( allocate<int>( 128UL ), 8UL, 12UL, 16UL, Deallocate() ); \endcode // It is possible to pass any type of deleter to the constructor. The deleter is only required // to provide a function call operator that can be passed the pointer to the managed array. As // an example the following code snipped shows the implementation of two native \b Blaze deleters // blaze::ArrayDelete and blaze::Deallocate: \code namespace blaze { struct ArrayDelete { template< typename Type > inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); } }; struct Deallocate { template< typename Type > inline void operator()( Type ptr ) const { deallocate( ptr ); } }; } // namespace blaze \endcode // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; typedef CustomMatrix<int,unaligned,unpadded> CustomType; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affect matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. In case a deleter has been provided to the first // custom matrix, both matrices share the responsibility to destroy the array when the last matrix // goes out of scope. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; int* array = blaze::allocate<int>( 40UL ); // Is guaranteed to be 32-bit aligned CustomMatrix<int,aligned,padded,rowMajor> A( array, 5UL, 6UL, 8UL, Deallocate() ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \n \subsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomMatrix<double,aligned,padded> CustomType; // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomMatrix<double,aligned,unpadded> CustomType; // Creating unpadded custom 3x3 matrix CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \section matrix_types_compressed_matrix CompressedMatrix // <hr> // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a // dynamic or static array. If the matrix is initialized from a dynamic array, the constructor // expects the dimensions of values provided by the array as first and second argument, the // array as third argument. In case of a static array, the fixed size of the array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense matrix classes can be directly initialized by means of an initializer // list: \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; \endcode // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M15( M6 ); // Instantiation of the dense row-major matrix M15 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M16( M8 ); // Instantiation of the dense column-major matrix M16 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M17( M7 ); // Instantiation of the compressed column-major matrix // M17 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M18( M8 ); // Instantiation of the compressed row-major matrix // M18 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M19( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M20( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense matrix: \code blaze::DynamicMatrix<double> M; M = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; \endcode // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore, // all matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end() and \c cend() functions to traverse all contained elements by iterator. Note that // it is not possible to traverse all elements of the matrix, but that it is only possible to // traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and // \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of // a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) { // ... } } for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) { // ... } } \endcode // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. The first possibility to add elements to a sparse matrix is the function call // operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the matrix // the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In // contrast to the function call operator and the \c set() function it emits an exception in case // the element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row. Additionally, the // index of the new element must be larger than the index of the previous element in the same // row. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.finalize( 0 ); // Finalizing row 0 M1.append( 1, 1, 2 ); // Appending the value 2 in row 1 with column index 1 M1.finalize( 1 ); // Finalizing row 1 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \n \section matrix_operations_member_functions Member Functions // <hr> // // \subsection matrix_operations_rows .rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \n \subsection matrix_operations_capacity .capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType M1( 10UL, 20UL ); // Creating a 10x20 matrix RowType row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \section matrix_operations_free_functions Free Functions // <hr> // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse matrix, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -5, 2, 7 }, { 4, 0, 1 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B{ { -5, 2, -7 }, { -4, 0, -1 } }; min( A ); // Returns -5 min( B ); // Returns -7 max( A ); // Returns 7 max( B ); // Returns 2 \endcode // In case the matrix currently has 0 rows or 0 columns, both functions return 0. Additionally, in // case a given sparse matrix is not completely filled, the zero elements are taken into account. // For example: the following compressed matrix has only 2 non-zero elements. However, the minimum // of this matrix is 0: \code blaze::CompressedMatrix<int> C( 2UL, 3UL ); C(0,0) = 1; C(0,2) = 3; min( C ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_floor_ceil floor() / ceil() // // The \c floor() and \c ceil() functions can be used to round down/up each element of a matrix, // respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil( A ); // Rounding up each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = pow( A, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection matrix_operators_exp exp() // // \c exp() computes the base e exponential of each element of a matrix: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log10() // // The \c log() and \c log10() functions can be used to compute the natural and common logarithm // of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_foreach forEach() // // Via the \c forEach() function it is possible to execute custom operations on dense and sparse // matrices. For instance, the following example demonstrates a custom square root computation via // a lambda: \code blaze::DynamicMatrix<double> A, B; B = forEach( A, []( double d ) { return std::sqrt( d ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // // \n \subsection matrix_operations_matrix_transpose trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // Additionally, matrices can be transposed in-place via the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_conjugate_transpose ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if the // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general or triangular matrix the LU decomposition is used, for symmetric // matrices the LDLT decomposition is applied and for Hermitian matrices the LDLH decomposition is // performed. However, via the \c invert() function it is possible to explicitly specify the matrix // inversion algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion with automatic selection of the inversion algorithm invert( A ); // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if the fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if the fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomSymmetric; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array CustomSymmetric B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; CompressedMatrix<float> E( 3, 3 ); // Empty row-major sparse single precision 3x3 matrix SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > F; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > G; F = A + B; // Matrix addition and assignment to a row-major symmetric matrix G = A - C; // Matrix subtraction and assignment to a column-major symmetric matrix G = A * E; // Matrix multiplication between a dense and a sparse matrix A *= 2.0; // In-place scaling of matrix A F = 2.0 * B; // Scaling of matrix B G = E * 2.0; // Scaling of matrix E F += A - B; // Addition assignment G -= A + C; // Subtraction assignment G *= A * E; // Multiplication assignment \endcode // \n \section adaptors_symmetric_matrices_block_structured Block-Structured Symmetric Matrices // <hr> // // It is also possible to use block-structured symmetric matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 block-structured symmetric matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef SymmetricMatrix< DynamicMatrix<double,columnMajor> > DynamicSymmetric; DynamicSymmetric A( 10UL ); Row<DynamicSymmetric> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block structured (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Block-structured Hermitian matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Block-structured symmetric matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of a Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomHermitian; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array CustomHermitian B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; typedef complex<float> cplx; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix F = A * D; // Matrix multiplication between a dense and a sparse matrix C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B F = C * 2.0; // Scaling of matrix C E += A - B; // Addition assignment F -= C + D; // Subtraction assignment F *= A * D; // Multiplication assignment \endcode // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using a Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef HermitianMatrix< DynamicMatrix<double,columnMajor> > DynamicHermitian; DynamicHermitian A( 10UL ); // Both Hermitian and symmetric Row<DynamicHermitian> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; typedef LowerMatrix< CompressedMatrix<double,rowMajor> > CompressedLower; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomLower; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array CustomLower B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices typedef blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> > SmallDiagonalMatrix; // Recommendation 2: use sparse matrices for large diagonal matrices typedef blaze::DiagonalMatrix< blaze::CompressedMatrix<float> > LargeDiagonalMatrix; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // and blaze::UpperMatrix within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); UpperMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; UpperMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix F = C - D; // Matrix subtraction and assignment to a column-major upper matrix F = A * D; // Matrix multiplication between a dense and a sparse matrix C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B F = C * 2.0; // Scaling of matrix C E += A - B; // Addition assignment F -= C + D; // Subtraction assignment F *= A * D; // Multiplication assignment \endcode // Note that diagonal, unitriangular and strictly triangular matrix types can be used in the same // way, but may pose some additional restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_structured Block-Structured Triangular Matrices // <hr> // // It is also possible to use block-structured triangular matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 block-structured lower matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 block-structured upper matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block-structured matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // Results in a upper matrix; no runtime overhead C = A * B; // Results in a upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row or column of a matrix. As such, views act as a reference to a specific part of a vector // or matrix. This reference is valid and can be used in every way as any other vector or matrix // can be used as long as the referenced vector or matrix is not resized or entirely destroyed. // Views also act as alias to the elements of the vector or matrix: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) via the view are immediately visible in // the vector or matrix and changes made via the vector or matrix are immediately visible in the // view. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_columns // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix // // ( 1 0 -2 3 0 ) // ( 0 2 5 -1 -1 ) // ( 1 0 0 2 1 ) // DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector // // ( 18 19 ) // StaticVector<int,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_class The Subvector Class Template // <hr> // // The blaze::Subvector class template represents a view on a specific subvector of a dense or // sparse vector primitive. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The type of the vector is specified via two template parameters: \code template< typename VT, bool AF > class Subvector; \endcode // - \c VT: specifies the type of the vector primitive. Subvector can be used with every vector // primitive or view, but does not work with any vector expression type. // - \c AF: the alignment flag specifies whether the subvector is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. This view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. A subvector created from a row // vector can be used as any other row vector, a subvector created from a column vector can be // used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector // of a vector primitive on the left-hand side of an assignment or to grant read-access to a // specific subvector of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<int,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2; SparseVectorType s1, s2; // ... Resizing and initialization // Creating a view on the first ten elements of the dense vector d1 blaze::Subvector<DenseVectorType> dsv = subvector( d1, 0UL, 10UL ); // Creating a view on the second ten elements of the sparse vector s1 blaze::Subvector<SparseVectorType> ssv = subvector( s1, 10UL, 10UL ); // Creating a view on the addition of d2 and s2 dsv = subvector( d2 + s2, 5UL, 10UL ); // Creating a view on the multiplication of d2 and s2 ssv = subvector( d2 * s2, 2UL, 10UL ); \endcode // The \c subvector() function can be used on any dense or sparse vector, including expressions, // as demonstrated in the example. Note however that a blaze::Subvector can only be instantiated // with a dense or sparse vector primitive, i.e. with types that can be written, and not with an // expression type. // // // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. For instance, the current // number of elements can be obtained via the \c size() function, the current capacity via the // \c capacity() function, and the number of non-zero elements via the \c nonZeros() function. // However, since subvectors are references to a specific range of a vector, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense subvector view: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v SubvectorType sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector SubvectorType sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an Iterator, which allows a manipulation of the non-zero values, in case // of constant subvectors a ConstIterator is returned: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the dense vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code typedef blaze::CompressedVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the sparse vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v( 256UL ); // Non-initialized vector of size 256 typedef blaze::Subvector<VectorType> SubvectorType; SubvectorType sv( subvector( v, 10UL, 60UL ) ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the vector it is inserted into the vector, if it is already contained // in the vector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2, d3; SparseVectorType s1, s2; // ... Resizing and initialization typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; typedef blaze::Subvector<DenseVectorType> SubvectorType; SubvectorType dsv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 dsv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = dsv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = dsv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= dsv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; DenseVectorType x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] blaze::Subvector<DenseVectorType> sv1 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType> sv2 = subvector<unaligned>( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv3 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv4 = subvector<unaligned>( x, 8UL, 16UL ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned dense subvector in the range [8..23] blaze::Subvector<DenseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; using blaze::columnVector; typedef blaze::DynamicVector<double,columnVector> VectorType; typedef blaze::Subvector<VectorType,aligned> SubvectorType; VectorType d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned SubvectorType dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned SubvectorType dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element SubvectorType dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned SubvectorType dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; SparseVectorType x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] blaze::Subvector<SparseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // \n \section views_subvectors_on_subvectors Subvectors on Subvectors // <hr> // // It is also possible to create a subvector view on another subvector. In this context it is // important to remember that the type returned by the \c subvector() function is the same type // as the type of the given subvector, not a nested subvector type, since the view on a subvector // is just another view on the underlying vector: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType d1; // ... Resizing and initialization // Creating a subvector view on the dense vector d1 SubvectorType sv1 = subvector( d1, 5UL, 10UL ); // Creating a subvector view on the dense subvector sv1 SubvectorType sv2 = subvector( sv1, 1UL, 5UL ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_class The Submatrix Class Template // <hr> // // The blaze::Submatrix class template represents a view on a specific submatrix of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The type of the matrix is specified via two template parameters: \code template< typename MT, bool AF > class Submatrix; \endcode // - \c MT: specifies the type of the matrix primitive. Submatrix can be used with every matrix // primitive, but does not work with any matrix expression type. // - \c AF: the alignment flag specifies whether the submatrix is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a submatrix can be created very conveniently via the \c submatrix() function. // This view can be treated as any other matrix, i.e. it can be assigned to, it can be copied // from, and it can be used in arithmetic operations. A submatrix created from a row-major // matrix will itself be a row-major matrix, a submatrix created from a column-major matrix // will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedVector<int,blaze::columnMajor> SparseMatrixType; DenseMatrixType D1, D2; SparseMatrixType S1, S2; // ... Resizing and initialization // Creating a view on the first 8x16 block of the dense matrix D1 blaze::Submatrix<DenseMatrixType> dsm = submatrix( D1, 0UL, 0UL, 8UL, 16UL ); // Creating a view on the second 8x16 block of the sparse matrix S1 blaze::Submatrix<SparseMatrixType> ssm = submatrix( S1, 0UL, 16UL, 8UL, 16UL ); // Creating a view on the addition of D2 and S2 dsm = submatrix( D2 + S2, 5UL, 10UL, 8UL, 16UL ); // Creating a view on the multiplication of D2 and S2 ssm = submatrix( D2 * S2, 7UL, 13UL, 8UL, 16UL ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // The current size of the matrix, i.e. the number of rows or columns can be obtained via the // \c rows() and \c columns() functions, the current total capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since submatrices // are views on a specific submatrix of a matrix, several operations are not possible on views, // such as resizing and swapping: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A; // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A SubmatrixType sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix SubmatrixType sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an Iterator, // which allows a manipulation of the non-zero values, in case of constant submatrices a // ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the dense matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code typedef blaze::CompressedMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the sparse matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 typedef blaze::Submatrix<MatrixType> SubmatrixType; SubmatrixType sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in case of sparse matrices, elements can also be inserted via the append() function. // In case of submatrices, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index in the according row or column of the // submatrix and that the according row's or column's capacity is large enough to hold the new // element. Note however that due to the nature of a submatrix, which may be an alias to the // middle of a sparse matrix, the append() function does not work as efficiently for a // submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; DenseMatrixType D1, D2, D3; SparseMatrixType S1, S2; typedef blaze::CompressedVector<double,blaze::columnVector> SparseVectorType; SparseVectorType a, b; // ... Resizing and initialization typedef Submatrix<DenseMatrixType> SubmatrixType; SubmatrixType sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType> sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType> sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm3 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm4 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; using blaze::rowMajor; typedef blaze::DynamicMatrix<double,rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType,aligned> SubmatrixType; MatrixType D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column SubmatrixType dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned SubmatrixType dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; SparseMatrixType A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<SparseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_submatrices Submatrices on Submatrices // <hr> // // It is also possible to create a submatrix view on another submatrix. In this context it is // important to remember that the type returned by the \c submatrix() function is the same type // as the type of the given submatrix, since the view on a submatrix is just another view on the // underlying matrix: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType D1; // ... Resizing and initialization // Creating a submatrix view on the dense matrix D1 SubmatrixType sm1 = submatrix( D1, 4UL, 4UL, 8UL, 16UL ); // Creating a submatrix view on the dense submatrix sm1 SubmatrixType sm2 = submatrix( sm1, 1UL, 1UL, 4UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Submatrix; typedef SymmetricMatrix< DynamicMatrix<int> > SymmetricDynamicType; typedef Submatrix< SymmetricDynamicType > SubmatrixType; // Setup of a 16x16 symmetric matrix SymmetricDynamicType A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 SubmatrixType sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_class The Row Class Template // <hr> // // The blaze::Row class template represents a reference to a specific row of a dense or sparse // matrix primitive. It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Row; \endcode // \c MT specifies the type of the matrix primitive. Row can be used with every matrix primitive, // but does not work with any matrix expression type. // // // \n \section views_rows_setup Setup of Rows // <hr> // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // This reference can be treated as any other row vector, i.e. it can be assigned to, it can be // copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,rowVector> DenseVectorType; typedef blaze::CompressedVector<double,rowVector> SparseVectorType; typedef blaze::DynamicMatrix<double,rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,rowMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x blaze::Row<DenseMatrixType> row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // The \c row() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, rows cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. For instance, the current number of elements // can be obtained via the \c size() function, the current capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since rows are // references to specific rows of a matrix, several operations are not possible on views, such // as resizing and swapping. The following example shows this by means of a dense row view: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A RowType row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix RowType row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of the row can be directly accessed with the subscript operator. The numbering // of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of // a row can be traversed via iterators. Just as with vectors, in case of non-const rows, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant row a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } for( RowType::ConstIterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 10UL, 100UL ); // Non-initialized 10x100 matrix typedef blaze::Row<MatrixType> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrix; DenseMatrix A( 4UL, 2UL ); // Non-initialized 4x2 matrix typedef blaze::Row<DenseMatrix> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st row of a column-major matrix A RowType row1 = row( A, 1UL ); for( RowType::Iterator it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices CompressedMatrix<double,columnMajor> A( 128UL, 128UL ); CompressedMatrix<double,columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... CompressedVector<double,rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. CompressedVector<double,rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_columns_class The Column Class Template // <hr> // // The blaze::Column class template represents a reference to a specific column of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Column; \endcode // \c MT specifies the type of the matrix primitive. Column can be used with every matrix // primitive, but does not work with any matrix expression type. // // // \n \section views_colums_setup Setup of Columns // <hr> // // Similar to the setup of a row, a reference to a dense or sparse column can be created very // conveniently via the \c column() function. This reference can be treated as any other column // vector, i.e. it can be assigned to, copied from, and be used in arithmetic operations. The // column can either be used as an alias to grant write access to a specific column of a matrix // primitive on the left-hand side of an assignment or to grant read-access to a specific column // of a matrix primitive or expression on the right-hand side of an assignment. The following // two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,columnVector> DenseVectorType; typedef blaze::CompressedVector<double,columnVector> SparseVectorType; typedef blaze::DynamicMatrix<double,columnMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,columnMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x blaze::Column<DenseMatrixType> col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // The \c column() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, columns cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. For instance, the current number of // elements can be obtained via the \c size() function, the current capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // columns are references to specific columns of a matrix, several operations are not possible on // views, such as resizing and swapping. The following example shows this by means of a dense // column view: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A ColumnType col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix ColumnType col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of the column can be directly accessed with the subscript operator. The numbering // of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of // a column can be traversed via iterators. Just as with vectors, in case of non-const columns, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant column a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } for( ColumnType::ConstIterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 100UL, 10UL ); // Non-initialized 10x100 matrix typedef blaze::Column<MatrixType> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 2UL, 4UL ); // Non-initialized 2x4 matrix typedef blaze::Column<DenseMatrix> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st column of a row-major matrix A ColumnType col1 = column( A, 1UL ); for( ColumnType::Iterator it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices CompressedMatrix<double,rowMajor> A( 128UL, 128UL ); CompressedMatrix<double,rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... CompressedVector<double,columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th column of the row-major matrix B with A. CompressedVector<double,columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix B would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // The addition of vectors and matrices is as intuitive as the addition of scalar values. For both // the vector addition as well as the matrix addition the addition operator can be used. It even // enables the addition of dense and sparse vectors as well as the addition of dense and sparse // matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a two column vectors of different data type \endcode \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // add vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // In case of matrices, however, it is possible to add row-major and column-major matrices. Note // however that in favor of performance the addition of two matrices with the same storage order // is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // The subtraction of vectors and matrices works exactly as intuitive as the addition, but with // the subtraction operator. For both the vector subtraction as well as the matrix subtraction // the subtraction operator can be used. It also enables the subtraction of dense and sparse // vectors as well as the subtraction of dense and sparse matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a two column vectors of different data type blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // In case of matrices, however, it is possible to subtract row-major and column-major matrices. // Note however that in favor of performance the subtraction of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of a scalar value with a vector or a matrix. // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Additionally, it is possible to use std::complex values with the same built-in data // types as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> v3 = -0.3F * v1; \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> M3 = -0.3F * M1; \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the // following example). However, each vector and matrix provides the \c scale() function, which // can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c dot() function or the comma operator can be used for any combination // of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = dot( v1, v2 ); // Inner product between two row vectors \endcode \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,columnVector> v2{ -1, 3, -2 }; int result = (v1,v2); // Inner product between two column vectors \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // The matrix/matrix multiplication can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. Note however that the highest performance for a multiplication between two dense // matrices can be expected for two matrices with the same scalar element type. // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref custom_operations */ //************************************************************************************************* //**Custom Operations****************************************************************************** /*!\page custom_operations Custom Operations // // In addition to the provided operations on vectors and matrices it is possible to define custom // operations. For this purpose, \b Blaze provides the \c forEach() function, which allows to pass // the required operation via functor or lambda: \code blaze::DynamicMatrix<double> A, B; B = forEach( A, []( double d ){ return std::sqrt( d ); } ); \endcode // This example demonstrates the most convenient way of defining a custom operation by passing a // lambda to the \c forEach() function. The lambda is executed on each single element of a dense // vector or matrix or each non-zero element of a sparse vector or matrix. // // Alternatively, it is possible to pass a custom functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = forEach( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c forEach() it must define a function call // operator, which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::cint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::cuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::cint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::cuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::cint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::cuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::cint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::cuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::cfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::cdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } simd_double_t load( simd_double_t a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( T a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is not called. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const -> decltype( sqrt( a ) ) { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const -> decltype( sqrt( a ) ) { BLAZE_CONSTRAINT_MUST_BE_SIMD_TYPE( T ); return sqrt( a ); } }; } // namespace blaze \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // One of the main motivations of the \b Blaze 1.x releases was to achieve maximum performance // on a single CPU core for all possible operations. However, today's CPUs are not single core // anymore, but provide several (homogeneous or heterogeneous) compute cores. In order to fully // exploit the performance potential of a multicore CPU, computations have to be parallelized // across all available cores of a CPU. For this purpose, \b Blaze provides three different // shared memory parallelization techniques: // // - \ref openmp_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // // In addition, \b Blaze provides means to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref custom_operations &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11 // threads are enabled on the command line, the OpenMP-based parallelization has priority and // is preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based on Boost // threads. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations // have priority, i.e. are preferred in case either is enabled in combination with the Boost // thread parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dot Dot Product (dot) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()): \code namespace blaze { float dot( const int n, const float* x, const int incX, const float* y, const int incY ); double dot( const int n, const double* x, const int incX, const double* y, const int incY ); complex<float> dot( const int n, const complex<float>* x, const int incX, const complex<float>* y, const int incY ); complex<double> dot( const int n, const complex<double>* x, const int incX, const complex<double>* y, const int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dot( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha, const float* A, int lda, const float* x, int incX, float beta, float* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha, const double* A, int lda, const double* x, int incX, double beta, double* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* x, int incX, complex<float> beta, complex<float>* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* x, int incX, complex<double> beta, complex<double>* y, int incY ); template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const DenseVector<VT2,false>& x, ST alpha, ST beta ); template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if the fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info ); void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, int n, float* A, int lda, int* info ); void potrf( char uplo, int n, double* A, int lda, int* info ); void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info ); void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info ); void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info ); void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info ); void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info ); void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, int n, float* A, int lda, int* info ); void potri( char uplo, int n, double* A, int lda, int* info ); void potri( char uplo, int n, complex<float>* A, int lda, int* info ); void potri( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info ); void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref configuration_files \n */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it might necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag of the vectors). Via the \c defaultTransposeFlag // value the default transpose flag for all vector of the \b Blaze library can be specified: \code constexpr bool defaultTransposeFlag = columnVector; \endcode // Valid settings for the \c defaultTransposeFlag are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c defaultStorageOrder value the default storage order for all // matrices of the \b Blaze library can be specified. \code constexpr bool defaultStorageOrder = rowMajor; \endcode // Valid settings for the \c defaultStorageOrder are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code constexpr size_t cacheSize = 3145728UL; \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // MIC intrinsics, depending on which instruction set is available. However, it is possible to // disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // \b Blaze provides several thresholds that can be adapted to the characteristics of the target // platform. For instance, the \c DMATDVECMULT_THRESHOLD specifies the threshold between the // application of the custom \b Blaze kernels for small dense matrix/dense vector multiplications // and the BLAS kernels for large multiplications. All thresholds, including the thresholds for // the OpenMP-based parallelization, are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for all dense vectors and matrices in order to // achieve maximum performance in all operations. Due to padding, the proper alignment of data // elements can be guaranteed and the need for remainder loops is minimized. However, on the // downside padding introduces an additional memory overhead, which can be large depending on // the used data type. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code constexpr bool usePadding = true; \endcode // If \c usePadding is set to \c true padding is enabled for all dense vectors and matrices, if // it is set to \c false padding is disabled. Note however that disabling padding can considerably // reduce the performance of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code constexpr bool useStreaming = true; \endcode // If \c useStreaming is set to \c true streaming is enabled, if it is set to \c false streaming // is disabled. It is recommended to consult the target architecture's white papers to decide // whether streaming is beneficial or hurtful for performance. // // // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref custom_data_types \n */ //************************************************************************************************* //**Custom Data Types****************************************************************************** /*!\page custom_data_types Custom Data Types // // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct SubTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct MultTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct DivTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref error_reporting_customization \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref custom_data_types &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref error_reporting_customization */ //************************************************************************************************* #endif
GB_unaryop__identity_int16_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_fp64 // op(A') function: GB_tran__identity_int16_fp64 // C type: int16_t // A type: double // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int16_t z ; GB_CAST_SIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_fp64 ( int16_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bclr_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint16) // C=scalar+B GB (_bind1st__bclr_uint16) // C=scalar+B' GB (_bind1st_tran__bclr_uint16) // C=A+scalar GB (_bind2nd__bclr_uint16) // C=A'+scalar GB (_bind2nd_tran__bclr_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITCLR (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT16 || GxB_NO_BCLR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bclr_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_dmg_fmt_plug.c
/* * DMG cracker patch for JtR. Hacked together during August of 2012 * by Dhiru Kholia <dhiru.kholia at gmail.com> * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * Copyright (c) 2015, magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ /* * Debug levels: * 1 show what "test" hits * 2 dump printables from the decrypted blocks * 3 dump hex from the decrypted blocks * 4 dump decrypted blocks to files (will overwrite with no mercy): * dmg.debug.main main block * dmg.debug alternate block (if present, this is the start block) */ //#define DMG_DEBUG 2 #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_dmg; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_dmg); #else #include <string.h> #include <openssl/des.h> #include "aes.h" #include "hmac_sha.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef DMG_DEBUG #define NEED_OS_FLOCK #include "os.h" #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "options.h" #include "jumbo.h" #include "common-opencl.h" #define FORMAT_LABEL "dmg-opencl" #define FORMAT_NAME "Apple DMG" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES/AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #undef HTONL #define HTONL(n) (((((unsigned long)(n) & 0xFF)) << 24) | \ ((((unsigned long)(n) & 0xFF00)) << 8) | \ ((((unsigned long)(n) & 0xFF0000)) >> 8) | \ ((((unsigned long)(n) & 0xFF000000)) >> 24)) #ifdef DMG_DEBUG extern volatile int bench_running; #endif typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } dmg_password; typedef struct { uint32_t v[32/4]; } dmg_hash; typedef struct { int iterations; int outlen; uint8_t length; uint8_t salt[20]; } dmg_salt; static int *cracked; static int any_cracked; static struct custom_salt { unsigned int saltlen; unsigned char salt[20]; unsigned int ivlen; unsigned char iv[32]; int headerver; unsigned char chunk[8192]; uint32_t encrypted_keyblob_size; uint8_t encrypted_keyblob[128]; unsigned int len_wrapped_aes_key; unsigned char wrapped_aes_key[296]; unsigned int len_hmac_sha1_key; unsigned char wrapped_hmac_sha1_key[300]; char scp; /* start chunk present */ unsigned char zchunk[4096]; /* chunk #0 */ int cno; int data_size; unsigned int iterations; } *cur_salt; static cl_int cl_error; static dmg_password *inbuffer; static dmg_hash *outbuffer; static dmg_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; size_t insize, outsize, settingsize, cracked_size; static struct fmt_tests dmg_tests[] = { // testimage.AES-256.64k.header_v2.dmg {"$dmg$2*20*fd70ac1e078f01fce55a2e56145a2494446db32a*32*9110b1778f09b1a7000000000000000000000000000000000000000000000000*64*68a32866b0e67515f35dc67c4d6747a8561a9f4f6a6718a894b0a77a47c452471e04ecef9bf56f0d83d1201a509a374e00000000000000000000000000000000*14*8192*70ebe6f1d387e33e3d1093cca2e94c9a32e2c9ba47d461d737d49a7dc1b1f69407b7dbc16f7671689ea4a4641652b3f976b6f1c73c551a0a407d5a335caa169db4a6a25bbd27fbbc38fc71b29ee9b1eae349b0d8a21d57959ecca6bf74bc26ccaee69cfee4999b55374605491af6d0b9066c26995209cd1b71925bcb45a8ef5727a6c20338f08de4357d4cb42cb65ecdc2344a5d7387633c913258ba40699ea5f88804b5e562bf973096337b17b4fc1236d3c8a80b9b48aed63c5a0eae3ae924a883e948f374771bba46923658f225fd2795ce0e795269f589e0ffc81615585e1224cddde654d689a3260e69683c6198bdfcd87507c23cefe36d72f8878cb27bbe5dce868752a7cce067f5a3110f20ebd31ecd53840103e0b2d44385656398edc487bf6d1a5ec3a56af54f9d4254fd20988df41eb85e366f13da1270a3f42c6672ad5faf00fa21e9ba3691bde78ab2c267a142f275467d5b853a107dbf1d75839f0e87b3b4f1d2cec88cc02a26bc4a63aa6836b0c43c5dbb44a832050385a48d46968361ebb053c2416c02458b76c95e50970922556d40b100967340a32824e6b6e44c0c1e0da7ce989d9d5ad91560156" "ed39666cbfbea71f28797a5a7a40e77665612e977ecb8b7fe71d500eafc29d9a0ec1d0ff1723fea7c405bc181ea93c0df42f5bf886eace3cfeee8b0dba52ba8cd2ae009e75d8845264d12dd632ca3236bc1b643437881b270183d2e2bd20808ae73d32bfe88347e33bef4921fcfac9646b74f116be1f04fc353d2222499d5247fa842d0d0f00fc9642ea7524adb65c18fff87b6efd060ec850d7de6f59869387b3d4cc8e38014d52d94ead07d16b8d94327fe5533941497c9be2dd6c04142ba57e29daaeef96d0f2d109522651d797715f4bc5f4cc3fb69fa92623b5ea3e08ff78dc59913993c877f4e2c8964dffd2c8cde6c6b6738da2883505486df5b633aaa8c66acbc2886107f3dd61b1df29f54a13ef27a7d2785c02153375240885e5c54297d88827403320799e05213761549eedc1c159c922087983410d2abadf9ef8ae460d018c278a9ea724f52b866e3d7ff2374496103b5137297100c970d195fca8c1286a8f9d3859ee12c84bdaa4b56ca91e307580b61dbe435ce4021007e4a2a8085976549cf1d195f439bb6e642567f91a0224e98796614d9ea6bfab8f6d13f91b7a80a54e538a1a785cd07b5d7ed2b7e45a0658b5722b5f8844f5139cff3b33ce244946757c020c54c8b5e43324023ed11001201213ffe4829e37135686a8bec1837b35fb234049570868dc5ba9c84cef6890d9ec400a794b1723eb209a60758ba9ae9abd23a7ea9f94fc6b73d29a560e24973c9160f195fbe82376c81dfeec1a7f912a8c22c067a26786a22f0b7db298" "3631400f120010706c78acc36ddcc29c7055fe82105f770e2dadf131ab49af93539fb5186d32dbe4a4df6cb0fdf6840c0609c8769fe242cc60d87e04e6e3be1a7884a05d9fb96c3bc1bbc769d96bbcc0413492eefc5502e9c1ac7c3f237b9851dc453b5bfa899b7b68e5e3b92711e7c92945feb6f6e452d6216e154a952cc28a3740925554d9fd44acedc8a44b0c25bbb6aa637fe9560437c08b17992c74de38fe1fb8fd5f66c2933c2d573ddc914f68f42d6cb350f126a51f607a2dd23b63e6382ec1e6ae434f47cfcd1e7d96c8293ef2994f850a27ef2d8210a0df0c219eadd2376ce36a22db56827d92a90d5e2fa55a4154c39061bd5490ba29f8309cf3e2056f761762dff56803bbe0607faef510d023b249663368977fede0577944f2ff05ead4b432bbb07a7d90148ebd1e30bf1204cd9069725d9fdbb850d3d6fde5044da1b9ffa222d99061c8ae217bc5b249960db545e6fece3ea2faeefa7702f065764b326ae0e62f3b8745cb73f35bea1bb9f6ed4fcda591f4d84da0415a0552306f6691a64a1d0efc8ac93559a79e57e357b63df48506c12dde74f6ea8fc5eeb1846c394fb8fd0fd40df26a42e53692db51bb36403305c1aff797e20adb6f8f1721e316705dcf8fe6e6989a5c3da253fdc6cb5de426f1c018161d72e34e6791d73023c5df69c0f83d3ea1d097f3a7ff37720a66868f40d3b87755bdaf508086c7e478ac1efc0dc421987af6db9b2f096a7270de91f5b3b84ee6d1d268d581718d3c534eeffbe2889388e9930cb051b5752c1a" "b1faf1e367866af7d4b37ba25c15a030d9a5f32bb8912ce853fe7988dc62aa61264e3c5a29d18c5121a605558b15004c817cb0ab1646138cbf6375f1a179852bc22d80b83891edfd38e25efcc0dbb78062f479a9dc792e5822e09ba3e0b8ef71c62ad7747dba8cc97707f31383baa93108d5c7253dce2395fa24d77c42cbf3559b5dc0235c0ce49ef9e3cc816598698c8f8c5b32abfaeb44f3c35a01a4f47421a166d5aa893aaba80e57eb576b838c95ed6f9d5b3d389a8f86b97fe629408ec7c7ba7fd95d7625e950c7324fdd35989570b24f2e1e24d52b65ed6116e728dc3a1004d3d8fbfeeaea1c7dc5d3dc7a029f97f8dc7f740e2386eb27e9793680d959821031fda08c7146f46e8ee47ec28c7d25574eb690de09849725e490c39e524b74aecfc68ff0d760d115b4d0a126609cef83b6c80731dd17f4a307331464953c6b41875b6e5fea328fd59f275e2fabd25717781cf9d5cc52286246ebc92527eeac7acc6e2652c6fcff405e7b4a78b8f9475f46bb82a68a6e44037d61de0df58a8b7a81f407aaa260f3a49c4a2641776404fc15bfb77573dc8728573a1872e7e093663842d9368e74cbe3ae547355fa101daeaa0f97dc0a63927e54ae59fe13aac4f488e938fa67a12876d103b4a56b6eb88ff0104330e5cdc7c6886b46545d523bfbfc88f40f9654fcd0f8c4f443a225b50b44af9674166d3de36b6ac63a150fbcda2e2511ae2a42fbe51c08f7238366aada5c6be8eeb41963c6a5374a94b332012e860d6cfbc1b8a4d5a9825b88a90c9a5f" "5615ca503698ad00df2cd93467b66d9b15876bc49895a081959132bad2e63757aa4e5ff77c6f25dd2581a3e9bb8e213c9313ceca0fcf5f8416882849fbee576d8ffb9dc057eb96bf6b81db60a82b0e6f315a13dd31706c0e36f4f21b9ce977ff6700cd77db603120d59ad8088e121cc3c502e37774b098eee7c8244f9bbe0d4a9d0deba3ec22e5abfea69ab72cdb75a001bb53672fe12b4fdbdf7e82c0bb2608de5d8e1961fb4524dd1acc890361923fb691bc5ea436246428a70b5021f9eee2c637eeab574babde4c0d55f57925e511ff623af5c4224d3ccb9c8572179e2610b4b79817ca18ddcb5302151f9facffca96269ff5fbb11e48209e20145bdd70d72bae54f6fbb89a3396bdaaa3d45413e3c5bc672ab98dfbeb3274156096f641494c1c946baab7c388a16c71ce5009b32f45dbbe37998906570045027950bd758b7ab2f72c243eccf9551d539946a99779848b16cddf9f163fcefe1e1ebee3ba7d5240b92698ad56a036274ca798eae19b0dbcf39a1c0ea1a58b29dc0e3de89def08e6c5800c94db47b7eaef5514c002d687b4d99b00fbd44137f56557830d63156f43bf73db8b330bca0ebb4ea5d50941b758929722aaa5452cd4a4e00640165dfc35fd35daaf929997adeb4c4f7611d66befb80809dc7bc6c763879c3bcd8dd0fe6b621898717fd095fb7eb403b07591b931a8e16ab488b01acd636bf4f1e71d5460532b8a3b00d7353e84c071de5cfa25de685cb85b569e08d2f177727cda11f196b040d25c97ccb83e355db98c2bc14844" "1ca95b5f612020bc53a81184ccd0c5f14bf6d9fd6318ec28bafe8d668cb3c98c56ad416007bef4a3ed9e12eafe8f9e7d87fbb02d1f557b497db1a2c0fe40ec3f23ea88332513c68f724cc8a8af6636c9f332a8e55c2d41fd81a23e92e9ffacd3ef14cda669e7dbe31ca08a5238c7fbfe7020933087bf2ce0a7489fd5a3becce5de09628234f60c833002aa8e9c9ec51f57c8e4ba095c1d054750d46d64041bb1f567a82d63bb5e88fb70bdddad0ed7572229e56b90e74dd88ca829f1ce8424bd24a0bbfe3dc3f77d244ee59f364b36a4b05fb511b5b0d7f876c65ab4233803543b0a68b9d2d6d45d292f91eb4700c2dbf431e40c77a4fcc3ac3fdf3a2bae3df35b6417b8f1eedfe84cc65a07c426780871d16ec5ed3201ea4eaa778b71f04cc1999587bb4645bbc43e365395e9188c85bd024f758304aee979f8e67d07636fea251423e920e2b7258580d1918fce772bf02ee66926fc5f9a3dd6a8c89e6ce7e4fc03d4784296df1a9152a1fc66050983a287e3520bf3e04d900d25316c8bd5ab489bf97a2f31f4061f895111caff9968ecb22d75cb9e5400ca1d0fb044acb4fb9cccaa4766cf6c63ae5a7a3f9af90d1b225067f671d85cdb4e2e21d2850f351d995d54520fdcbb8cb30bfa82190ab2071eb8bf350f984408b206597371736110114d12d79da4027f9a58c8fede63cf16fa552d2a956ae2a49c83b0afca3056f87f1e27bdeb9d14a7e5cf30550017a3233c4f386769021a853b971746aa28aa69ca980bb02979779c5bd29259c84911e2b252" "61b92be669e8a731dd74edce66b6f3ab5944695efd57c0004ff637eabfbc02ae346528fedbf2ae80d420580adc4d571a37fa1397fc2b85ec458d5262c15620c88f2dca0eb1bae4ec39d67fef56ecbdf89703919e5a6767d0f77bf6f0f60ba21003d033c9dc3057df18d855a5801110fa9a29a42ce10a44a39ed883df249ccddef8aaf832387e70048d9ad6014cc17f9a2bf7146696ee4eed388d06a45f7bd7696e57500ecfada9e9eb17926b16bbd90146e406e281141f0a918c320cacc9d1f045ac1bba87ce8d1d45cb6303988d5228da6ad33df6d2a5bd7f265b8f610078e9db5fa3db0e08286e500063f0fd6860a11d9985226ad382a95bc3c3941d43378ea1bf28fc85749f616092d77e7c292e311337168b52eba08ffc0f76582710a1a7d33c55162b3c7fbf227a324e1f4579e035ae0fa17fafb1ea964aa977490b5a3fc16c75e1fc50a6d17e193345b71369df804c61a71bf60be4281c3d1f945c690368c23caab006f9dfc913dbe6119d6fe8349cdd424db7074726e8bdd0ae99e2bfb9b800ddb965c06e0587cd10108c9b431cad4fd10d3654a22ceac73553a6b2b2218ed6526c362df46cfa776e2caea0de61b9d5c0c74e03e299ceb2221ed0f30ffc5876354d5607c3eafc77f78e4fce5e0c7f6ba7d417ac5f0511e2635b41b28dfb4f2fbb73d351a69fff920b76f5687386114b3d5ab9cad056c88840a023b7e2df73f007852763570d38a966c8258365b014a12a3497f506dbe55c073244333547223785438372884ecd8b66aa0a794ab5fb" "94b0a519bb3cbf01b43463c0c7fc6ebc67754ca25686002e13edad54c817b0aef64698637d18a4a8bba382add892f4918b720aa99b09ed2a6e02b7140f89e3e00680f37343d3e47412d04ef78005b8b9a23b92d145a8da9c5efafce374955727367a7f1a179b990868550cf960c6df6baf2cddda5fe3e689de8dfcf1474db419ecf88cbce9de7a58e9d8a15991fdf5361846273d195a2892fbc95ad079ca8153910984c4694edb4c790f430043c4019fbd96fe49d8afa5e7d1f6674e4a125bfbdc916b0d3819566898599443ebf2a87b1fdaf41378227d396d2d320dc5b860705bc87f45eba2b6473234fe054267698dba0913ab1234b46697c54e2b19526d1ad4b7e3eab40a413f86170fe9f2a71eae2fb959a021b0b43516f1c8a3e674f37ee235ade79ca296364b0cad5ebe8449e09b63a34e8711587f7f2fe6e181a787b1d3a8f30012ce9549abb834fb80c673c575a25d3c33bb6d846ac231f411dd6422c59215e0a267424c0c57e6c9bd5486e8b6327e9dd16b7065eb74ef91ec9204360b03d08654a4e418346ec2d4d21edd5608a76903494791546d430eac38178d158d61951de3c61fbe5d56c22cbda4a3d40297f7abd83913e8b483d9a80cf000810d90a921f453bcf9e35732d2579c1aaef4a6980c666e3b273a9f91d9918f850bd6e4475d8aa5cb616cec58d6ab6d70dbe2b0f7ad85618b6e60dd4ff5d0faf19dfdf27a9ee48cd7b2d6613e76f04ab6ef5f0af12966a90875816c27c4297a2bf622ddf66fbe7c211670d0c46c7295b93bd2f1" "22568df3dc46e9294c7258a0b7e81b2d45979680edbb7ab323e4857d84306ccc16ca79c711144eab7b37e3437245d7b78ced1cfebfc45892791b9ac6cc1211f83e328ce3f57af3d89b5be89dd2efeac9d738330bd0d8d4a059bfac06d1ad73bf6d427541e559c3d16eb5adc4380c1b25c1b8a9097ce7eeeed1c5d6884dd1a32ee2bfaab8371593a0eef65f80e705b9b56adfc0db4c272024a71947755032a5ebc1bb346ee8a99b01b408cc0b1658a319ffa5ab2eb87e9aa8b3dd9d9d92ce3bc04e4ebcc011a280143927676360f249ccdaf7949bb23770a06ff5861661d36d761508f7e9ba149310d1347c3165e07997853d415abdacfae9579d1dc0b5990a05ae9e6dce8931ac2db9414546dc64f8161a64cf30b9ce8c50ef2a99775f03dfc2c611e780a5cbcc27cab920a87d940acd8b3fd42897ab6f51b29214275bd564c50eb7aab3ad19a2c903c84d2ed5a23c49c81d87cf3244505424332c917d7b671d4a90765b8953c26bb7ed5dfe3e93632610ab44296afee2b5c631fe643a0a78eb9af94d700250f5a82bc57d24825423f1ecfd8cc2bb0daa229670d0d9a4fb342ee8c9b7b16d86d29abc2a57633303b918ac78ea8d2672dfdd4a06ea0bbd756fbadfb0c09e2426a65e90ca829ea00ad66ca8c9e79b9aa5ddd02d435cb23014b1033da00381ddf2dcf408660d1eebd1f6c7bf5ae9fc3fe47e75ff7ca482716534a9f3365f5cdb48f3d59fb19d11bb8782ef96e394296594812e8a7da23a953f6117ce577e55f3d6cb1d3a4007dc7d252c7123a8" "37be12884e54ad10757af405beffb5cff189133bb7df5fc009544b2d62ec44fdc0c1c8240d4413af5b36e031510b1f1537a690ba7049cce9df4bf4dd63f6987c513992fca78a1cb7e8d670fb43a52ea2ca2f49724e35397041e5c75a365b510f40fa9bd076377274d6a95af801981d71972da0a08b536b024f439c43d13902878798153ed825ddd7dee8937181823076f036caecec170edf1b5fbdd84e530bc50a7acc257bb9679d72de3f115602d18d2d12e6ecf4d3242ccbe9a71a1483e7fe40d2447ba028a76aa92c13516ebde90dc4d204095a554cbfad79d6efe4ec540c7b51593413465b929742b729ca688f67ee9d9fe76431fa81217fb135d0dd6ebc91904efcb0cb6dee22867e5ddd7453f530d04935f41575de9ca457da55b67791d2e8b83890b5be543366b92ba6579a6f19f8e82a0bd87e379967766e5b0a58305b984778c562ea03a8b8392e3160ea4532b6ce5de74bc8fa0e8ebe88fbd62a73d7106a309f5a5f5d7617664b015e166fcd87906caa80ab4eb3e62f73e527b5d951a0ed0340fe17bb7b2692e4a31d14798879788fed12413bac50e490ab93ed66311599a6c1362fc60da5319ad907c7ef7852985ce86246276a138379d2004772d4d9a989b83b3e780bdda9825ad06a4b3dcc9a9d4d8025cbdee7cb2e02ea1f77bc90bf4ae56903859025b7283ba6410aa91933466623b996e9ad07e3095e376b11a27ca451c246d5561501e69c6747013ecda44f8d1fa50a75572453c9ddecc07b1aaeebc04cc7e976915f5e68d1236ae2ff" "dea4b9fc4f8e91b03982801e2ba604b46ad80f966838ae09d2734c6482dd16d7738cadc1276593a336e2ce8cf7ce48d1535c7865f7b90445ff3ab9e56f58e254115bc07710de50d7953238d7ca419013d104d90fe79794995c28f219c963d716bf8942e0cc5cb432aafce4afb42f74596b847fde5d87fba9adce5c17fe590fe58e60379393e521ee194fe063211d72c29d58f7dde89addb6b0e20515ca7aa270df2ef2d77f92219781502c49292c6c4a985242b9447521cdef5a52b53b5eefcc43e8036ebe90b51a3565cbb180ea1b3e3d20f63b8f420c2a7f01c475428d5f63c66f122654af4edcbafebe34970c152767cf623eb4f1ee33931a79622cafc70cdd2bc7ccd55ecc1e0aafde3f66f5414315048d3c5c51638c35fa920cfcf7a18ada48a589c12e4da2c801cb8bf3b182463707a17891cf296ae8aae6a8a88ee3d602cc1bb7647861f65ec1a278433ae08d8c8e63727633425fda0b86d78378ac80b1bc1a48abf270dc2b5ea71691eeeb979950cbe0ddfdc451dcf8e3dc657060f4c3f96512b21bcb228a966381efa94bbf5ff4bbf38a803b6aafc719a545e4d0582a62e81e6468aa04eaf131f8d2f545c060651e115032f5b3579fdfb95a2328f5c9a0308874630e840ae1dcec1b9543c36267a9651c94c91cea42a93a91ba3a054ded4a8343864b449e46abec49474e218c8c541b00eb0f8997e710025631ac28be3f08126446dee0cf61bc69b85e4fc021f203c796cbd2ca16ebc8fa15f55510a08ed334155233c6459d2d428df31a3f376c" "d81a530700b3ef08631dc5b50f787d4efe2bf219bd17f0431803d9d946255716e8543bf77fc44a48abc70a97feae8398c2059938d39fb4ac5f7214d92bb89fb9c45b6d117fd51f6207935beb1a89963fb9d1aa020669bf809c21154c20e720aa1178ed2bc13fd548e0d7d01eb1d028aa48318a02dc7aa412e2ae01ff59a86dae40771ad3f48f0fa54b6e679854be00deb9938e37ab3a4c9a96f3b7849ac75b82619cbc806c42f4bc4feb1141f6a8391bf9335f643ce5cd2791590b28b19d03cca7b5cf702f10ffa0317327e828deb4791f71500f243be77a451e5759c6c711b38f8f62757c54d7fc6dc586a90df7777d8cf1c72f9c0947af005d770f4a74b6c9413738c3b5ab32306ff5b41a6446c2de3f59a27b79d877d3f05fe22d11afd69e49e59f35b3725a0ad126642f388602b7816abe397a9c9233cf7d1e12a00362306d2d9b81fddb279544f35e23a8c198930f75986f26e6f292ae8debe5da0a7a5b8add2be71efc78179eff7fa2a2dad35863b69e85e8172073f434f48fb03f7bd1bc78fc2badbda261a68f7bfa171c898897b3b0d4852920674b8d9ffdb37ce66c1b6aaf9b375253a0d74eba4d359737f7fddb42471969d81605e41f615399c5fd6cce1808e9b511ac54f75f774e84b00970474f5136447af04b4866ab6c54aabf7a247c6caf3ee891fecb14073f3cfdc7368ac00f6b1c9b23e301e49257840f949a57c28a95c5c490bca91bf979d40403f7b9458bd255df757e6eea0bf41d5175548aa46243d98f2f0f6c754d6e7e58fbea97" "7d7e0af8b7d0a6bce07d0c483293868a914a50aaedfb9b239b4c3c472381535b287a4146fd52e7bf882c9c3eff7bb2fae15d5b96bb1222d81d26dba563ac550e716b6c08b062cad6702a33a9db4274fa2e81af815e8325101d5a9ce9b345e29619da9e45dcbcd7b0935d7dde07644edc6b049eee9371511bb2cac50ec1170c7aad835c54fa52c8e0a0e8446356488e09c2f07b17413a7ddb872d05016aba129cc36de609831863747310f0fa443480a47524dfc5e1f34eef3ba2fefa29e596e7fff86a924462781930fab55e71fc2f06271e62878e51e0db08ee5dea31f1d2afe9a4f548ad6a4f4763c9d0eecbcdc32323aba1c9c12554a5cfedb5310b4a03caf426a80d725fabd557493c46f2a174aac851d3d39529d5ad919fdb7fb0dc1e5b0ffdf706a9f5af36fcd2bdde28d68c5af4a1da4e67cd44f97b555b62b39cee1274b7c3dd3971ace3da6101c87f9b8f28c5e13d4066a3e63543825dd8bddc3e90b6dc75bac78931da98929a337817f68deec6065f6f7883d5bb10cab909c9945f71a672eb2cda9fadf4a8d9da906e2a5d1f589193b4e791772663f1bbe751498bda065f90244391169d80490208083de39bec984af73dc99b10d85958f372004a03962c45c531b347851dc5e26bf7bcdd68c9b129524d6734282bdd431f991170d6a5c67138a5405d8005b355ec7ce95496a8e98782f6d978c42c30a17db9c12671d82f2d3e257f66980f20bb6380303f1e89b10035ae7bdb3e55d31f2d1574784aed5c95aa09aaa9614989d957a65d893dbd" "abbfaaf30cae0cad575e39f5311aa00a6979fa52ec12dfb2f731a3ce5f8b6097a612c2ce98f5898eb2d1780d0cf9ad30ce5395ae871ba7ca6a0884a13c09732cefc5aed9d7a28c09041cdd62e75d7396432545f0c16496b7f5f516fb2cc603c0ec10a51ee952b7cd0593ec00dddf67e27dfe3f0cdc5bf737170243a8ed3c1f59733fb47bde4b6578d7ef11f95790d4c678d95ab2cbdb1673d2d516c189af00f996371077276e672f1223926fdcd6627ff86816906edad3aa97e3a9e7346562add05ec1a94c2dbb7f3b28ef537715a1d69761bfb8c2092e608311af2f79a4f8188665a48539944374437bcff6e59bdff4e4b9e4dce11307d892915071157698460b9e9fd68ee0d1acd21434810fc8ae702fb8dc794ad5364c79fdd74c8a70f390556930fc2a23064f36411c626179d1d745d4875f5c2b37292cb8ba37bb78d419f05e9a5d2245a38da20b6b14eba2d5ca3d58d23bb5ade1322cf337eb75a97ce98c167b6305907c3fe18038bee1e2450c3095480f99c9f12d2b543b33866e5546a39d539c6e2d639356bdbcbdb3b4e0935ac76e0fdaf54cfdf241d2c5ce135324885f8cd69e6562f48979352bbab357c6861c66b4ff7d9dd5d32a8ab8b6e759a2f5ddcee847fa439a5f9e3989039aa60751019eca6c7dfcc2464ca4a1ae12f079d200961797cb0e52cb046d1f0cb1d97c4699e07f019b48edd6f4a71b99ba26c2e5e72745cd9bb9a7e89d8eaba646461bb76818fcc447de2820196e32cdcf4a57c527c52f64d316b513f6a611c929890be5b0" "3b3d3352cef23bf86d0e058b1cd9c4a10a9a01060aa9c9cc4bf42c7c6cbb677724db3f0c3736461c1828e67c9916e953057024371bb4ad8995672f760c47574bde9df9e73af90773cd46c9df8cb655f8c37eed8cbda40da06304471e32bc828a7dd9457fbe4d63a15633009c1a9f003f3db7f5b2b5e3b22c60f747d5627bce3eb4398a543cf24b18cf0a56728adcc253d7f5343245c1426b5bcd9daff94394499cb6d7ac2b4e63ec424c66f5dbceaf877fc13f47e744aca7d8b5d89c8d5621f4e13488b141062ee04c2312528a0a987a5d32ebc6ffae45657f4b2d1420890970e363a124b75374594dea0560320b36133e31d6a978f90ef079b81484503c7fc3edbceadfc9fcea06f271a60ea6c5d434b694ace1b506eaf013aca2c6103acfe6c565a5a24cdf638f8ee282ac812e32cc2662a8e2d4a31239952836c4896870d973bb65b280f0370f4c3a54c7f4723b2bef522ca4c233d7646da3fdb9743e273afa1e3bfcb947eea9f323ca908bb4961b214aa906cca1d2d56eff25d60952cc5897ee6390f9af4efd5d48b2aee8734cf6b8042f2de75b107f8d135d9a63148e88e43df815fe7871a354741f8863af4e114ed0369515bca104f8d3b24a2d740b8617de3e96a23*0", "vilefault"}, {"$dmg$1*20*f615ec6c463799eccc6a2dfbedf12c6bdc422a2a*56*a595f4a81a490e7aa6378034661da57a424f922c971d3db3f856f8d54b0784bcc5d7182905c4237153c5d250b8aee1d26410b1dca7b1cb73*48*74a060efbaf2c79d5523219d8162c425befbb2094fb46e7ffaedc7cd4f192e6f0c47d8aa91e0a3201346725d3ddadfff", "vilefault"}, {"$dmg$1*20*9c82b419bdac1b3e6b71f8a6b99a7501f34b6950*40*5da479e292e0acf67a9fa3e24d0a767cae2f645ff63836665068637188f4b80295de79aabdbc2536*48*9b136165ee73418631ccf28d5e77073788ae921df596649a7a7789585db0f13f446d5927967e2ede20ce8a4f5389185d", "vilefault"}, {"$dmg$2*20*839730be2331c69df4f729ffe8a10c26653bea94*32*1f24e25712c2d70d000000000000000000000000000000000000000000000000*48*3231e20aa642889a7e087cb87c84ba1cd52864007cfea677796a6f52e16b2609696dde9230aeb5603aeb1f70f6701be6*14*8192*75884a049d2b7a40c14002ab6e511bf3c73ca79a2bb8285a3d2ac1d5b9b0cbf92d4a483fb762bae8485dc3fc9cd7a54141da2b74a86ea833d253d56f52eecb9dd4d40b9f846690378cb8a5db74fbc6d756ef9fcdbb5d21805ed43a7fb45d6caf6b3d2564f4a7760030aad69ed9e56789e8b2699bebfaac3cd73130fae1d8ef7f003e765e86eb84e990f3c24780022fdff3ba283ece4fa8d31716e5cb1ea22e408431eeb2cda1460217efda86461e940cb10ae602a84ddd22be53064e66c0973a04405ff17afa020b24f1bb4ce42750b28cf4e98c4f542576e712f3c2fe0a0539a411290f65ca763a94d865fc24b1beeefbb6b055db453da38e62bc383e74b188b86c54b62f589334de8ce3ab2e4643f76eb4db95bfc088bea8c4e88cfccd19b89b818fb698982f73df634c8a8148e4c8d3ec2dab02aabcf48ec0a78686fe0b4f5e589a067d6c54f0732e559cf9db5b4ae1f0468f5681226d3b03002cb6ec528b96470f1d1aee5d3b51b4c5f45a2702830ea35056e02279e76fdd30b3ac174cd91b65fd6a26a192f6e632b0fae660d0861059a62bc512f610f4974c22993bbafa364fd2e8eb53d07244d165f990c876320d99070fbfa6fe7e0ca42c0ef2f17205ca" "7196376d4026a8a93fa83a99cd3b6cde354ed3122dfc07ffef91c24f2036b0d83467e120b85a92fa04120cc8f7af3196adb6420f519c610983d163964b0cbd048adfb89266d9ccf9845cd17ed04accff9d106b7bfffefb365e97357fdb9ab2d0956411c0c73bdf235a9ea4b50962c8f258583899ff2c0bad6602e8a3c14f3c870fa14686d15aa17f5cfd1ddeecc7b061cb5c00db7d198d083a690ecee97a1b4b0251349beab744c4bcb53a4c1702d1094f6591ee5ae15a29271ee3d3d22f0f833219c3676236c9e9620a206ab6ab08fe5fc663f4f2ccfdae6e34adc68e59fcba5363f44cbc5d8345f184ccb38d52bc2bbe6ad996c3d4316ce644698bba6044209d108c698c3d18f4b64161651224cb015052d2e9bee0079b779d77b6623e9669c4ff99988bc612c4099f6b8bc9719444cecbc5f87bf9ca6dc30f3b346c3cf20cc342cd4d156ed67c8be0f1801c3e672bfdf2fb9e6c6f1ef3570d059405a8a0c5bcfcd70f7bfc1d2417e3ca205be70a5ffc9b4d1d123ff64cf72b20df25e9861e1da57fd1311451e542c25100c19d1d70bba2c26752e4cf1c59a6373fceceebf2b4c392a45e2cc7151f4cc1c7292720b5f0716cf7ea752a8a44cfcb7f638c5387a410efbfae90598f2d99cc79baa298e30076d5ac8a2094dc14d81953c09fca8b41f88cbca2274158b93fe5a151b93bec1fdabe1a6c67807d5f9d46b2a19ba85f9540cfb54656fe473216ee1922046c5b6cd08b325e0c25a420765a61e5f7a266c9e0ea1148f0e62ec65736d4cacef77940a0eb" "24e93b7b656e3b591f5827e78b577b628da26c1e5bd7544dd439d15ca21a3fbe96d3833ab1bddbb03beb8f0fe39517958b7bf43afdbc68b5061b41145e151d228bb5e5220b31a86878be40060839855db438368e40dd6b8d534c5c39009455c0a783455b41b572f2864eed60e5dad80979b97efd6dd08549c154b76f748101396847efd56a97b82cf62a25e26ecaebfa35d545cdf886ecc22460cc0e2983b9da14ac41dd1e1dead58a2c29a85f6bc900268d755d1158939470c4793359b50da19addd3d8f722c0a889ebd8dc69bd955b524bbe452cc98834613ea48d7a73a9b93820c0ba718cf664d82a1745451a204a2845d4e2a846f0f18923ad0315896b1c1ac1942fbdcba119ceed9e02b0e707b28feaba44bac94888ba1a31670cdce6348d58d2072eb13ee805d569815fb28749c392d11eb06d8b1746ba8eef3313072fdb4685f1401717933fd18edbc99e3d89d08a4c7798bc1d724d6bca02a31642ca0ac6223884580c0be8f6508a6650b783a9ef24de3713f65fadcb2da6d68c4bbbdc216ff91ea7bd24bd7365b91087c14edf70dbd4eceb2676797ead7fbedae77a0add9d22a515e2a79d075958d8fb87aa62700c62df007abaa3a5e002403205fe04edaa4aac3da6d08ad9ba909974e9091148208db90f330b2c2c702521d4b1b32acc4fe6b7ffd9f96fdca05b6c404afcc789fb9ad8c52063fc0f9b9cb4116ee11f07aa17dff57b889a4f4abaedc51a07481c1e954d78ead32c6e808d3eafe7cfa9d2d4ab4886abcd2f64ba2df2d8d507cabfa8" "d01f785409d71896461adaeb4e34d18f9b2fa38779f0932c27ba2f3f75ece12f6eaf7a0d728dc02e97cd44ff175b592b8234c3e3b5491726c58dcf0a1b77698cd38d861fcd549aa793f8d2b58d6afd1d9b7bb96c8936c960eaa7072c00e69f68f948ee24494b8152bd8e5d6923c8eb26023dc660d202e41663888a8e8550092b5e1610452c79069b3cab41a2e7459dc0d361ded09c9f1589999623f6deacf276eb72996a355e4f7dc19a5217e9dcb2d6a3e4679bed9f980a5dc8f24a1c5f4eef00d706566e12ac8deeee964ab9501be5e57e326a6fcb794e4f4fe14922704206a343724913ca2e1d26e3d83cf994cb7aaaf9a916ea6eaa06987a9822c5a8e556b16ad72d5f5640b3490d6b0f290f9f2db7c3ead435e534406dee40366efb98f0b53930a83ff9bad177b84343d204a1083801f1d68b3aff78ec4246f670f924969e4608b419ea9f5aafec40d902492f62844d9a83d65f38af2531b875b964abc781b3537c708fe65f70a11552990447bf6db287412367ca918a39d9e2b2e228451807b01174afc33f5f67d45f9c765015da6abd318c980fc8bcba60ccd5193e7a8caa54193aa83bff7b77725be99780da88b3209a3cec620c17f979fb16e640473b0d98a2f492702ab99f2f0f83bbdcabc2a6dc4986476f420f112ffbc7bddac8cffe59e82ff558151b9160e2f99bf37a05654253321591ef31d01b32b8d69297b3bd57f127e9f574fd472b6d29b6e9a0e1fd43252bc1f1b2c8c959f3f4d80177b4fd6a77dde8fcbaf1eabcd5e7f6d38630f35d" "efc161ba7432cc9af6bc73baabcb343c469ab18e4cf88eee21e49311b4f20077bd6e30705338f047a9c7bbdbe4dfa6d7be3a827c92823a3c8f36909f9e4df4dd91426b75ac6b5d953357929b0bcd91ebd24e651a855755edca82c4664d3c89fca6001ba88688e5ec8d5e5c3fb145b963b29424192530601d74e3b815be85ca44640ca89c57ec4ac7084639b82e23f065ac561779c040cbfe63310ec846db02873203feccc3f88a28fa78d8d567905abc9f8f561b4a29ec5c380849ada42100c15efd3d73fc203e63a315cc27b82f62c4ca0df9ea213dbf7eb39552fcc38edfba0ce7e25dd097bfad5224369f1d2a175ab88ee5a3371daece3342e99c60cde76a1ff5dc7e5ebaa7e0fb59d4d088cfbe7704126b2697d62d7b82289a35ea778ea4ca347410513513084f1fa971686724761f711a916ae1e92402ff3d52f948fdbd9c1d961c6ad6923c8ae9cf3a4eae7a9369daa5cbdadfc786e873b90ed1e8f5933ebd011081ae7ea236c11f0c53e00c1c0f9206f91e6954123b5caa08c7615a787c1661dc17f297c8ed2ff6c90dfdd9a262ab5e9a4489d6ed7ac032f72bcbbc2248e7f1675e2b2da0bf85caf89921fcd8e78403f11a28970f673ec7adbea798b3eff87fec642ef77c15b3f3d19dfeb74d1ef6a38ab938692207133aaeaf722aec4f6082a4cd742bd37fba0f1f83f01cd2fad6a169c4716940f7d74b8f29001f406de5897a5e5d813b995df132cc57a5d9bdecdad9024dff7dee8b89189d35085a70bba2e5e0a8c1c71cc593238f3acbd1337b2c" "c5a8647ce6bbd669eb939279d3b964d661112752bd7fb877c4c6ccb5ef72ff5446410286fc69347841c5595a3408e0c73fed8984d0c0fdd2544a168ccfe41386702f6ab7b3675a78b57f9782f23e0471e6dceb176dc9eb871ddd92dc0b86b2a11293523189c75019200a45213f0cbd86823f65f28cbe6569a58512dd469431322b7ca5b9b8ca57e56a139dc4788ffbac10fb57441f2435584651fa572450a4719c8c9b4a322f3aaedd3693a55820c725b63096d3f211d830d39aa89be83d59b13145dea9231266ef6b1eb1fdef31203922308cff81b166426d662989a350ec712dba14ced58df7dda0d0fad05ad8d9c6b247307d481f79e6a3cffdb2ab9b21a8208d6d7faa72b6f22a505d2b950884474862f6f67effc81c6292f3550c4e8852c39c52d952648b256e961d478c0c6979300c5188c490ce5c1e34ff6dcfca63c0f0571ea616651ef6f9781f2d355dbca208e56948ab9e26c5d2d3f8509952bba3e93241837b11a89caef6c956c9354ac10425a6d8d4e82bd5d7411d18655393d7c542a7c914a5ea6aba717a226e0f51200cc949f38c703f4f6ce452cc1d7d6ee8acf26d34f74981f6850b11610c11d1c5e6689c1b6fcd6b6e997ea145851c6655560c33dcf5ed7315578263c39fe6a838c5de867f1b3cd482c0206f56ebea0617ae25b3ca8d7e13849bb2b58ea4e21409762d549636bb7cf5ec32d3216d827d94cba1f36e7632e3a43b3203fc596cdbf879d1aaee90804fa0cbf46d08ff4c40aff8fb2b46f7ba8ce21d17c2d3d025b67702054e" "9d76716fe7b5c9d2f43036d86e6a17924d2f160f91110ed1f3364a1177aa6193baf59878ec84f450914faad409618bf25cae17ba5545abd33833ebf408990fa4236d322089aa42eebea965e59456250fa14bdb61a32be8d70372891a83e7bf298168c5431e0b326229c36c667217bedbf64e3a07019534a087e84cd1a9cf35a889d9e65a7be63e8d638373774148e127b328734963437e7f00253d2fcce7bc0d798c09326ccd4f379f8a29f2d308ab2fece6fcadd653b1a3ba53a078e51a1a87e8dc03c5c118444d82d9166c0c4c1bfbe8ee09be6f8cd497a20132d4b6e1edd13683b363dc6587de2f11cdd51674ebdaafc41654d639b6cdbcc040f5889efb1f64e1b873442493ebffd8f867f0e1ba2cc629bc5239ded578336a9e88ee8b2d1b71f6d9303cbfb8a35e4015d2f9ec25eb4618c2ac17166e8964b68a66e60cb7b464e36a2251243a218ee542dac96062ec7db751273435dca23bf3e8aaea895ef1d6f6bdc98fcb6a9e0658dbe734450682cd1a3fe16161a9fbd035270fc86684971e20f1f1869546e1b77a481774c9449ac6499f376bc3c0f0efa589abe3bf676fb385ea50618c681eff6e5359678f078292da285c4b5e66d5ddb43499abc3558490aca6481299c351c6b053739d0065c187f59767e7de24f1b7bcd2d80d0ab2e7c789a9f5172a8411a88d2c69d8f9d2744ca7e42ba8478648df29919c23c0f4cf14e2428c792f2d8abae1073b97d86c2d5cf2e5beebc7fdfc449ec3804a81199d6c4f24d9b040bd1feeaf141b7eea626c1fa812" "e499b74e86dded2641ce3e11a04a35c8b8831a4de563c3614b4048eaa656d8dea460d2c46f6d748be434718e9f54934804756fad07d2a8ace694bccbd7bf2e33c09199a22a98726d2e1a690b2a9c33e39c8746d8125d93f675c571247b0a060114eff4c32231898a05e3ced4721edaaee9ebab9b46692c65f086d9fcd34b86a499685010ae0f4423625263d0a2a62672624662a6613bd4235b7402573af1b0571c364f7c14e277b84e4a102b1055a1456b912431f9ce9e875056f8b48345ab09bf06b3de6126fae32e2bd61d2fdea29a2f3cb46d963fa40694c02657352b9b9918bc50fd7e26584e51ab5e4bbcdcbc18b9bc17d3efc5935ae5077a269fb8e912dfc91a2c287686590c3e2671f6d29365c044fac2c077fb5ff280b0a4d69eee3b9538b4c8a029a3360902ee8291ca9f1088074f307392b70a7a43ceaa07c47d175b286c052e2412237da3f6acb1eb6b1ec386dbcdf5b49d2391615788f401ec234b58b112d296b389ede47243c01a1a6d18ca5dd3f2646d483b97e41370faa1c023118a1d2006694debebe35046f6e5852952bb520c9991cf9dfdcf89e51fe29d3cdad6f1091fc7c450782f06b09cb8aed1e1f95221af7ad369e49ed672fbbf2d255549d0fc0398dc6b4d37d038a8dc9e8d9b4d6faacf3c5fd10663107cec0e171ea6e1c26eb8a1534646e0813ab0fb449d15b4865eb2e9914d404d06c1e284f66e39d09e99eaf7c2f36997ac6ecb9197f8ea7fbdf7da38e427dd5179ef265f1471a096fd24d8ea2a2ec3b820c54356cd912f06" "9accfd370ca945e60c72b5d479b15d52a5c3c4423c73f4ec06d9201ddbfdaac2e304b1408674d40c203ed48fbf4b126904900349228b28fe262539c9a12270632f28241198381c6e7174d275227c99178ef4942655ec95acbc19a3b96fd1e07b5e0e91488c979e7e25be5ea733bc3171b2874801157c83a6de754ecd05cd78d6d2846e7ce19f641bdb53075dca078ad0ddfa871c16e47da96d007b5e2b2854d151dccfad21875fcd12df56dee7f4aed6a54fa248ba2721ab2f58c1157c85a3df8486f99295f2c9b8e8cd7a65145b69ca93d0ac4fe328e31c07bc1d0af2db886266def575d74be200ec9a4ccb0213743eace8d7d39f810e3877876082238d72c375a5cbdc4d7de36c2ad90904a173df80195cff86f19a0904d18a1f8a92cc4779e5997dacba58770c5091dab5b832dfaab2d0fd102b99e3b8a799ac6e7357b294a31db5f9bc3d04036a4a6e18dd47dc88b0f07e1c4271e5106f329731ce4dea9f56f6d63beddad788d7eeb955589a13990cbe3454b07f63477642613bd77f3bc5d024dbc5c55a0c7426ac7cfe63dd2da9f0d5a7e816dfe5856b646b648c302c16b50296882c62334c9b8e56ba6dab63a9c787fa153d04e5e64503c6bbb9bfc8957d2fa607ecdd3714123dd52b6f9c1a3a73f649dfe67fd7195857955cb8c5470a9f363116cbb580b793033280dfb63ae47b384e6aed677251b63a7a27447f37e9817f10f27c4a0560ef34c0255617cfb90769aea2e5971077cc89022f8a44493d5157ab2962946c7fe600a24f002cfc6108d345" "469a65f2f29b55e4da3f4c767324f173a11567ccc401628f2934989b29875ededce223de3134b7e99384f94436bed28329daff8da5690984b491d43f14d86d5a5e783545442f913dfa39f25f6360d2143fbe4c7e234a40f65b2c48ff5835c3fab67a92d0adbac9e63993db052a832b1c7b6045a495b82ed0d7f1068ec96fe1519493f7376a9f9f331f6ae89420fd1b523278df3e78c7b957f599767057113d5a1895801f1fff1b7021fde8360c4fc1ec8165132244b680645df7a1c0673728ca6323379739905856537091dba18f762b7be6f5f7e95212c402b005d73dce6a7775e90093f927edcf0d9ca24d04809f953ece372414d5f987ec2ae030dbb547db5ec17bef47dcb097fcd2fdd873eb93a99e2209425d4fbb589530fe41bdb5daf8ad8f83e48557a01d2ff6b658368e39bc8324cc2756160cdf56b8d7fe231aa03e82bf0b3f55eeaba71133a6bbf72342727a52ff7d158992895c61c0bab4cfe42ba5e4d5f239ef5efb6433dff84a02e2a5f12bfc35c1062e4103a3f8fdd1c5be28bc83725023c8a72d2cf5103a7c97a23b2d9903a1870726ad2bbaef7b7a6dac3e36c1b92769cb3f43eea1faf95c53db0cda2a8bea38efc1dd11695bb5de4baf583b175a32d49f98c37510e9e56f3d9e10bb4aff163abc91a36f24fb38d33d87fb4299d5ceb5144c69cb741b03d35436002d7740c38753e284a808a77cc1d4ff9e63b9ece720e778497c25b46ccf757449cb3b3fa8e5bb6d5a9f6eab58c97e9469cc6192b7b31362453faac839327067f41f25ff" "34c2cd40e9fee3a0b8133f266407587ac40db20e7d7d397e90558e54250111f540a44a70d427497b5a06c8ef87f6bba0082e00d42adc7eb38e890dcf5cd426c1bc2b4c781b07670382aa0d13e227e05c1987d3cd0241b5ad78387e19dfe4804189dd8a10cab05c79409b9414a6a384cfaadbefcbe8e3521fcbcaf52d92dcf1611ba3a824b576051aa24f42cadd7b7e9841375646740f2a6271d81d2d5f4819ae6a5d3f1feb6f7923f4252872c3a2709a8b8556b3977af8c4423bdbcf66ade1b3c4303539e06957e8930aea8ff70d6a202407aa44c6c8dab0232a33ff3f3ee9f61ed664bfadde8d294022da21b10e0aee583379d8dcdc078639cf3a1ee18d6ee1740bf1b917ff56070bf807b90d5a19f37a5c31214c6a19532f364d463595262ca057f5865f0d55636ce080acfd4e303f03372af014a3c32d2efec8f7f6cd6c825e5edf309ed16008e50aafa2584804c1897f6433e350cd91e155ac786dd9c3deb22a39d69e85331086842f32ba7cb6b4d4f13e08d90acaff24315020f7efb2b74214b14e840d739378afadcb06d45e7bcc17f2a03ed54d0da71d865508900334386ab96e11b88d2811c84539e4e2a93aa27d66620500789bb4d595a8b2e5972b1805d88af2b722e1e9b8aef10ca3dcf5ddbf3d20a6f101bf8f8a8cad825946dbf0c64193689f461bc0c62d138f902575ed601e26184a10ed9df17ad4be7c9672147c0158f132452ea502948a749b474cd0a63ae5cf942609e4864985b4060239d0cee6c78ce4dfdf5750b51ffbd5ee920967f5" "dcc52df6771e286eb83dac1c576f1a073687411cef3701ce6de66ed17bfe0fa5f03c63f96fb40ad70b478aae1e16efe22cb9e8c2aa57d5498803d35fde7f920b32ec686e6091a9ba6eb91fdd17b3302b760d084bda32244f704e14af619a5c9e72bd14c4e69f51177a26174c16d2e3eac934f184d460df5640fd84c3d3dbbc6785c249a501203374c0d58852d52c4c64a6d70ead2af1bca1d61f6f4cd00c3892565e085d3e603a0586d176f478062b092b205807fe7438a065ae7dbcb14f69c92cae4000dbd6804bf4eabf112813ff0599a29b1fd8bcf9d0ba7d9b14e40e38826b48204d8c0a50fd804167c88056cfe77e7a75ac36b5bd049571639b3f02a7e973abfaff1327080630a4bbaf6a096005ca2ccd54f076f2c3311e6e7b48bafbc9de38d01c8a01ee41d25ff0f775a2db4e34566e377683bad9a133482ab87907769bd783bd170b616d48974ad332e3defe94a2e7d6eccfb4cc43cad93b53c476e7795a087fe58cc074b591315daceee3c02af54d9beac8162b70dd9863bcd7702b7c8c72022856f78b2d249cacaea6c1dbf1317ca9e35664c518bf4155501ae77ecc3f47be6e7151c4d5fe56b893c69f1f939cdfd2b68830d9ea47a89fa7b3d4f620e0909d5a97f2637e2eaf223f25fb5ce7949e3ceb87d93db628872fc469f58a749e8b4841798ef505ef2712a3ba713386dc56b83e504c3d24d2ae8200698f9b3eca8d7971f7b82dbd5df6deb34865e2e6336fcd2fc3ff00bf9c8d04992f012dc9473e347ac05aff1040f010b1683c10dcd0bb" "49b7b5883ceb6c0bee4bd2ea6d275f884a37fc7151245274f208a457f4bcf180d793de68f09c7b03e7e430dd34e553362f91c4e721926eafd54d6c8464082d2d4a4c5b4b44495ddb06290f01913e68c7cd95963242df31741eae89eec41d0af689518ae335aae42c60041154356ce475ba0bc7f6c5ec798cd7c493aeac5e08d7ef554dc23832161a615a6b902e1d4f7bd076f3bf045360cdb73c3b2d7c158b74d2b718b95189225a0824a38836d1d4dbc5a2861e62f8a8c2723cbf1fe8951860f0cf7b4c6bc4c307cca509435e077f3947b8fcbb8ba1252b89d61b69b0328a2b1c31255c2c9df670bc244af42599cb5982878fa363627b321302255f2a20e04b70e8f4f63638af83a98ba40c55ecc46230798224de084d2cc203841d91c4f049c9b0a98535f3f905bb80b24679de883470c8225af80361031354483d879f98b78cdc5aeb07b371fea8355d146f9bbe16c9178f3d83ed63e2812048a386ef85d6c35ad696936a008a524f358ec8a2e40081c3c50b73fcdc6199f59e14b6ee213a8161f675d5938ce72a848ba9e7ed930198d9ae6c43dd86d94d88c5312be17b9dc590072e382607390e247869674ff446e8c37d89b7276aa61b5ebeb0ab18f500389a326341ee13283965dd4cce69b666d2c114372cb0e5b5d9921cfdb5e12aea0d95ec0a73c8d07b3b3e0dd8d159d323feb4bdaf6ea184bc2fbed75e7cc13bde26aa597ea7eaf0e37aa4be069c2c629af7debd8692befbf74d6c9939165e3238d8b2b573001ce957942b199e5c57935ecf5ae0" "c3b161b96f1f637605bc29bf5230fc65524041d9970e9b4bd6e7469e0c0bfb62e672b30a7094b014c27a06e3982d83a951ea4207a4d7b38eb155259b847ecba4675c3f82c48343a07e2d5fe16d3189c8dc0f4bb1fe2ca4abce4638a4462f0dd79d69c240eeac8ee4bea297bc1bd5683ca97a352712bb4461fd507f9125f895fc7ca8fc76c7f78207224d0fd142669137ccbac0f023fe1700eef77abc804e9b9da27ad5c3a767202a0d0a36f8fe86e2a8ac5f30303c39fad8b65a206239b881910f9d904f96edae31e4befce7822a7399ad06355bc3c7198eb1a4b2c7c8b4c92a604dfa4905109c35edb62dd3c817cbf5261f5069bccbcf98da9ee5ea192151237b31131953509157f833bb1b482cd011c361d768347b2d0da11b1dc43b392d609f0c4806d7325e92f9d76ecd278fcfb9d91e9993addffa55d66acf9211b7cdcf28c73bd4e7cf83a869532c90f9880bb963cec69cf40e117b3fdf9c0c5c9d6570a2458aa9d14716ecb8b6642a4cb1fe0fbcf8298ad0db3c676b9836910658f03bd47ded56ed210cb1e2f1088c87f4e225faabf29e2d450468ff6614f282e15b4a6fbcc9463a16f802d3ba071fa5b009403478f1088ca8a8d9eded648be7394aa6bb3590c0725ec87fdcc53c4d2afea49ba11f9f2b3231c912bdd9431ad941a7d89f70d8e1669e90553b047b5f4a033437fe3b84c05105227efb5390e6e99b597fa1c35a1940f513ee8aaef9485d1ffdf7ce94fd34dfccfa8f178dc113c32082e0345f6d39294ef283b6f9a566a87b1122e74411" "8e643cd6a2ecf14e47d68254d26942666fcf957586497c72c9e5814ab3371fe4b0f9a7fa1e5d9629d0dfe9e93fb388865a599076e7ba983365fb3bf574d335787416c099c545feeea69e3069d841b62e4db9833e6865e24cda78e2bc46ee83ad5d79bee507c44007200e64b5d1329930bd658e6f051cdefdf758e5b023650c2abda7a6827ca394c086057c617dfa8c161ea1f953446d8e0d5f6d5c76bedde8d596d1641a973e2b53bddb8f7bfcfbd0fbe4883f4d6d4e6f930e51d47ccc40148e6ed1b409705e9a777f1bf86af2621cb1f04ba160a5faad78a0949032e9dd7e34bbe6b2fa1c478a990d3b7c474a2f81af7f7246bdcc669df005adf397cef71869237c53126d1301ceab14011a529d4897cb00f7d93f35031facdcfda8110b9fb5d55a057ac9087a9cc8f1034e03f79a806db8a8e726e8afbfcb2c7c39d3315ecad3a2e542d94753b88717b7791c66c47a45f499885f6c096cb1093d9dd6082ba8eb2132e4a80e22ee309b7f74af55530e190d73315023fe4b52fca855a06fd111fbe1125910f4ace6dcf228447c007cf82fc50993de0202d28aed32ae795d2d75ba8c975b78c657af*0", "vilefault"}, {"$dmg$2*20*186673f316ce762e8f2b2595b3e8ea204aef584e*32*df036556654b76eb000000000000000000000000000000000000000000000000*48*71793cfc457320157f12b1351051f60e59fc80a728f82f0156cc8b3f20f75bfb4289c65e6c8c21589f3dc6187540551a*2*5953*3c25089e22f54dfa868b7460f43185a32b6988681952eca4a493ff4699e2340f8cccd06ba2df28334dd01b83f8bafa3754b7afce8f859ffaf64d33950a817d5ffa9671894f71d6ef35aefd00d237f7f8f413b8b8424db42e6fe7bf503d1d4222d77d5c3c2a16f26a1e15d7797cedd59fbeb45f70ff7731cf8be628895f13cc2937f82c92e0d5c6b6ee0214c668ad1ee4f41501dca668af0f83ef252bd6b6444f9028f12ce15134fcd8610426b5a6a75ac25fa938f93280143b5c991a683fb008a08e133a962dd4e3aa9ddb57e72955e3a840c3599b84d874d61cff4236fb487e2a344ee3311d30a531a20ec800ec591607edb97599b297ac67e173a4f7d98ce2d73b66c37659bc75becb65b799f0a1642a4282ad623ee574091821c971363128e307288b4377e1e90e831b800936f2b5eb05fd5d0e505d71e7e34311950812131c5b742ea238bcdfacaf35e23a4b5b9ee2a7c0da6aca0ff02595fd4229baaf700eab8ce7ea772e133bffd5665ea3ccde2edf61d11e64dbd1919454f977a31292416c86e3e11b762a3c6f0c27cf1a07ba3c4197f21c8959e0f04fae6a086be6e77b47495d0cbfcfce05e34ef361d45b1f8c5068f0174cbb2ec9a9f37eb6ae1fb0887" "17630b97bf46c801ca598878e6a8a96b232266479925e8f170bf76afa4acbcc6c7daa51c2b9a1821e5b5df170a8b57aa371019c240626b2f2a9d60587c34383ea7c12b300fb478e2b62ca9bf54b00f04f4970a68d6689c4087713e9b6be1e7c92ef16a7cd527d1ef33140d8d3994c07d8ae237e047bf478f164aee1c6300545bf986e570a403ef626c5fd14044611621bc5d5f37e417175a22288c2fb45b0e11e946f755fccdd774e5ace72bd2ba44be8f673235e9b49c0fd4d6a912493fa797bd97462de0402f77da7eee2ea6c0d02fa880ba57390eb1f73927d4616b95067d18103ad4b10af7a40b35e620211acf4c9f47fd12080b2df1d350d17afb649ea5e8a038157561b107e7d1d00284a59541c0b759bb424d2795ff1d3bfd7749461a9f67502df649d2d69e72036ab4f8869c7bb35fc999a9179612524e2f9bbb00e7dd5ef8fbdbfc486447ad5ea93b7220608aff49eebb98a1de88c68ce2b9846a63ac6b8878fd645bfc0c0fea6bb746b15301f58d2b9d2ace73828a623885fb495761be85780668b436fcaa6367776dee9e3af641ed5755f1cca7a931c97162f6879c7a3bf6eb47f98590d07654be8fd8582c5774f89bebf6fb113d75d28afe74443a64af360f41b9d243d8fb865039d924fff4586e3c76d9d0d43f8487200e802adb9e01460eb6ad5538d8549999c4b38c41dcd878b8dbd049b853aaa4426e74226fa19d3d501e6a93aa99dcea681f0044e15a05c2d08ae49f625ffe88181d2c1fe55e91b6f602409fdf961af1da851fff67f1e9" "c9ac10dd3960f460bb8f937ec415870cb9e99e150f5b2a2308f2136960d199ccf5900f130a3f4610cda347991cf34fe46717071dd5ab2e8dc5bc20757fe6357fa56a18a606b25c51612975f51cad52e5a20a8eb2cefc79732fe19baee7b8c65167e2949a4ddc8d1e262b47c97286c2d0fb7078b3f553453445053d82a865320ead1ff4bf4fea84cfd7ce21e7aee696a15f92da1f3d73c394d47a254247492fec3b6582c94cad0df1b1b097048c9c91bae6aa269f5a074b796bf86770059cc767aa07fcf84010b1686437042d16d693775a03d9832857bdde9f7d98392bbcc579db3bddbc58d8cf08f04064e3eb92d87829e6617efab245cfbb6d564c5fa333ef560d6105c525e39177ff5530dc154b691b1dabf14d0da99229a04ca5c6e7956d474c0ee578b1b287b0a5971506687670ea848820c44875c74e69a79b36eaa3cc2a5a27fd5098f0fd3c190089736a271ecf3f14b3259cab95b941bbebfb5be132d875328a1b0ddeed958e8ea454ef80724f878a2a690bef56fe3ea62f47cfb6db303ae608957dbbd57735195d6b1b2ed73e69d1ac4b4b4fb01c20eddcb29e8b44bbd71fc25515885a56b8b7e55edd4c21d5e8cc43417e94e57cc49f279d0ed740b286d4e27c0b909729c4250ea2d1857f3f7d801a87afcee46f455f8a53e211fa0a311006cdde262ad4bc47941bc52db89c4b454b7075bf29d9cad6c98b7e84318a071789a78d1a83ece7a24cbf17691aec06c5fb7bb8a832c0aa33b27a5b3a68ef36364fd85cbd19e8f75e184c3d1cbccaf7eb" "c71211506021ce0d38bf8c0885a205d7f4a60f7fbc972c7e2365b07d5a52fe8ae02608c7bfb1650ebdb4f2620f2698f5fc90c7b42a34a31732d2cdd12a4bcae3ce399623211946f74c67c5e82c0f53701bb4460504e17c1d6fa14288a63d97a86068be8ec36670adc16670b5cb3c09972b596cd441e4bb9b50471708bab77691417517e91883df9f0b353c2bea3d0acffe5410097edd2b3886592cc70ccaccbbf64d168637a8a3fff0d143e497e5311a9b13b4adcbe8d2625dd1fcb5ffe9c83ddd4a1cb3046616296faed945fe7b29ab6f912be6959f8768ce28958f2441a1e161147145a1621693b9f2d24fb9c7a89535456dab48dbe15c689709e2af6a6805edf923d8504f3d2cb8220ff9966f854c84e9ff04fbf45e42a5c73df4f719b9ed287695a4a03d5c0a3a964a7b6e95bcfc36a292b23774812e8567a02cb8a5baaf89afb900b3fb7be40c9e8432656307fbf2487c0d1f3baeda11e803f9f298e7e0c478f9fac11a43ca32e2cda46ca6491cc7b31aa1725d24805587722248dc326cf81fea4fc1ba9a58bdce9e34740e3732b96889b36e917cf029c7027c5cc985f8b3f0fa4e504325d56c7e653ce903e8410a6b06a2126b3aae2030404441273c1e486bc8285dc078c1874635e75cdb753a0fa821567e8116179b78039f8cc52675d538fe38a71f46792af445b125dcee671bf7789f2e874b25f05a431ce574a2d85762ceade5e5cfebfa5ff62b1ef5ee155fe418b16638c1562b29be425e05ef0237f03bb42181f55d4370272a13d5fbb353358d" "a434519cbd0e4fca54f9cad4a7735238098d3984b0cb9360eccfc63b3b4339e0ad2b2719552085d7445681c919f21a6b482402c271e34d7f9fbe4fbad68eaf825c57d22ec0a2c5ddec8c1273131b867a3760626abe779e37ee632f41f212e9a9aaf26fd5cb28df689d9c4875c49db62213faa1e18c35b5d2df1fec21852e7c35d20d6df85ca2a6b10898b244da31dbb6de3a3a8553601c0dabf1e5f4755fc77c1561223cf0b1ee43441c3aa9d855df0831db6a7f6949ff0ae1cdd465aee616b789c268417de07e9c0f0ddae6b07ce5186b3b83ef96fa1ba9fabda1bd79986efa852a348364e33e89458550049522e64491a9b24514665af058b4be4ba690299d3c2379b25ec97575a9312b38d3106f805e829bd77033f4d5f1b35ffc7289c118749b31f17babb56f48aec597049d635c055d056db0434493a379d15010f3325690444e1021abd622d18ea7e0b5d5b97054708ea9087b4721bf857e3504aafec84516feab2a6f6309a506cd3e931ef3ef47807feba8ff0b6dd56eb83349d99be8633675eed19be804c06d4d81b0a256ec95cfbb2b6565d7906537c5adc404713baa8fc2e0f425c577660df47198e91d2eb3ee7a9a5025641aaa759e7e1f3dfd85c83a17a6a59df4af62bc669f28d12544254f4e0527a6b10958664af9378e41aa9f88ef3041ee6880f23a858254b5d0fa7899655e9d06f12fa863b63c2c950a0c3eae774149502f0fa3c3a44d24add7f9426ceaa21dcdc5408f0b96d63dcfd97dc4a3ce03ccd56c8d48ccb253e82d50123e8a51" "76ae5d1b9cf6b6c11d2decea9f91e9ddfea605eec75391ffc4e01f4988c0ee78ccb3adb8a5e16644eb30e7e76ff251192fb3a8c48a68224a2cfee4aefa616ccbb68abea13d335a4b212b0b9841a42b418cf413fc868a842a26950e11061608a623a5dbd520aaebddfd1a559705e8cadf6abfa272925651f84130223b0056be28b618bfdfb164d2c5db86d82ac0eb2c457198a6cf8b0c2f2560eeac4441df45a9192cdef63a00adee0aafed7e0ab0bbb0c0b9a066f9f45f5e0c6a9376a069a45512081ee3edd2e9679d6c46d71e3740c5ada7457fc5d21610edccc2bef851d18f89e8307105855da15dfa749c44370b8149de48309f99fb5040d05d0739a64cf253855c185550339af73be6d5cc2de3186ff4b004ac816c1f4afcc83ec3ad66740c57b9cf660de7ab97b0771189fae5957751eec58a3aa6d3ec6121bf767d13533ff413c84c1ef47142f51ebf515c3d60a3c5cc3b9eaf9d43d2a84b94ce02db3f254862cf3c6330574fde5f8257c215c416ac3c9833839d5b33436fc12c21046025a4b0be90f18dbf002e001b8541b888835ad138def9910c4546fa0cf496bb4415463cb10004959dc6b0e379c18090bbd1aba6e9588fc21a89778ed1a1c0533049867569691aef6bc310fe4853e9e9bdd94a58943017a197526c70d2d278c66e94aa97abe5af8d9faceb0fd4e102bb69c824a1e4709be2125de420aebb11506bd62ae6b32eb1bb2cbcbc35dda3c992193086b11203775b33dcf4206a976b31222fcfd8b0e6beab7eed02f9f6d0dc2959929e1d" "30c856a672379ea1a20bdea6e023fb7ada31f6f9e02f354f464b2261879372c0c92ea462ad11a83d54bacfce3febcafe14753d697e905a7c77031beb83076444aebdb99cd1aa470d5774ed91cded7eeccf7fb18860fc39577a054b17aacae86d02c2dabbd3ab068c982cb095d135c11daedd863bf9abafe991656d1f7773cbc05aa66c4c800b5763fe845d06c3b19f4f73dedbcd50ea363aa11e8274d541ab754209fe7fc159e7bbe317f8d9ba602bde8fe02171f8daf608bcd4663eb401c7a3f2cc814bd8fc195cc192d4d6fefbb15b9d9738f5e6ade7826d65b9d8477ef500afe2e40077b6ecd7d3ed78233fe980332a313fb2fe854d6becf9ab4c1008cb1b16a513d3fbed8036ddaaf372e8891c59c6e9bcdaf2d88e22d528b975d1a36af2fa792028a3e1161a74545eab1cd6284079c2353ef1c49e3e1242ea52d22d8c7d64f553e4c396e7d62c4a6619ec698b56cf25cecb6673d8a3a703f65e480f1b8b91e4427e9f1e9dfa1939134d03cb3115167567835d449f50cc9bae06adc68e3211d8e0cc1faa34f7bda6e1cfb088fe980397f4643e89052d2bfeb233ad81c3cd466bca1b1007e2e6459e3aa1e51f1a326a2f5d89407c05946b0dc7741f458464b5e4ceea5e367a2e4f0d007e9e31b24f5b7bf69aecdef4ef57de58719cf9fb5e8f5366452013a5bb69c3f1807d83e26bb63493dc141ab1ae8eeea11c495650b346919de060c4af1a80823fb10b4cbc333b9d6d05c6a4c293a7fd524c5259a841500617ee442222ef2cfc71a0e4bffa87903ff5" "31898a44452ca2b132c4a633c91c7a24bbc885a01001988ab845e53a350c3b283dda71360c7a9b47ae40f72737ab6be068ed8ecbde1d0bcaecb729c5bea691ba0de6867e6e6879fdd99efec2b6de4c2691ec9031189491a01329fafb2f0d0cc28e26a22bf55be6ca866dd4a473153901f244c63967e829d9ae2ed83451a365558b697055a3b9a6bcb1bb40ae56f13d4b60defeb1a06cc6831e175ccbdb92a34462e786ea28e2ff25b813b63b30ea3b8d9a0921a5a5bf45576b39fbab6071fb1412670c936b5fc31d668026d297c5b84739021c4e763686e4011a2bb7e109db8e1d6bc853235a44ddd93f1012f7168ba3091a2a92a3e05bbc761fd97ebfa22265e6c1c2bccaa9d327d4ad61de87d3b5f0c5b29e604f79827064e05eede8b574c8982bcc0439db27b15bd7ea9a38923a1982fa7063f9f1572963c75168d53756803f6f60604ab33388ccc1294fb0ea143fa5e128a060da40f4dfa0382906b878a602c568f3c99809cf1d5912f224b2adfdcdda84df149217bf8edae18fb4bd825900ddc57ecca2eb7d209ac44e06e674c2b7c126756bdbad066dcf187344824050b16ff9414fe957c37a048c3a260a8dea72f7a12bf5b35e1c2205866bdf85367d94af939bf52a3027e2c560ca096a449b7297687bee98e4cc56e1449448461d028e435fef26f060097cd96bd605d5a1cf6b1cc95c49037401878b85d437ee43bcfbd7b2b8c145c05a33fe01226a637dd677bfd28c8acebc4a30494917c253957462cdd5a3d200e350f5d92c5c57bbbc7b2392e4" "569610f35e3707aae8a481b8500dc8dcfac689a018671a0f3634d18fc7bf4f7c58933da452308e348a446ade0bdd6f02d29cd8d273544ba46f1767873717fea45f0e0980339fc187acb7045612e95db5dd9c89169daccfef2e3a01c4d19984f8b1cc960d054285119f23e746d743a0db459bdd5803fcdbfe92137e80d47c84c547848ae563695cbf113253b8a96e368bdacf59ff73c023d043348c1dfaf143ed13424662c2da644c25b9d22598813e1973f30ab103c0ada9ed247ca038a056d18f2e7c8443fd2c95366b387e9ab972170cd2b4438455dc73619ab3444da0d64b0b2d3a9d640ea917b1c09d17c37fd587eedab367235e1748dad753e4cbc74dd53017ba65571a5a65269666df0a24bc694a2d24e862830e7808ea8ffc1fd6cf4b29564c8d77d9692d7fd55e496c69f5f17fe145abc0dd1818f2cf6eb979c33eaf41050901dbbe5a49c8bf9983b1284fce92703b45c4131b3204fb9edd58b6cda3918cc490051bf9d6751b7702e577b700230f1820238b959e46f7dc3a3abad842814c69a76be5376c1e7b35e3ad7318b3439008e4c3801bd6754fe67cc7aed658d89550a30cbb1193eb5d2144eb7f84c5c6ee9e13947daa3534ad4902ceb9cedcae471547bf95e2337760322b55af97457d23d174b1c6f3e1d3585feb000953e298e35aeb467e90342bc61bd05af59c72921b2fd4795c19bba268bc6bf4f18349ca91b89cbd6814a62dffd4684ab78e998f7e3833b51ffc495ca3e789e685417a0d972bf4192b0c50016a64ba839da14c3c5bdd" "58a74e96e56c66d73e2869323093892c5272aba5e6edff5a8976c5e04976c8bc1b8cefa630cd924b5bc7d28dbc67b8aac4d7571623c4d412acbfdf61603d2cdf1bed6fdcf8d88519a3ce3c4803317587c4a7dd33147f66aad06554d69138959fc3172298be9f5f83748b83c6618758bb45058fab1bbc1434b993890288a42910b91bd52ac1abe775acb09cf7173ff9fdf0e644ee94b000c8ac5cbce24d424800a9df431e03c650b3f4196115f100b49b7a41f68ce27e5dab5865b40a0977cc1be995d3504dd3bfcdc8db2a57765b1a80f6cdac0db795336bc9ffa4cc163df1d9d6e034d5b246cf59ffb2f81ec02ad4c48eb652be03c97a11427ab519d8fc8d704fea98d597e44cfeb168f3fc1385f1a1dc5926dfda78be4c3a3e1d024e4492e952cc8471ae1f26150cc065bef433c0431128c7df6c57bd79dbd409fb0684137465ec0687ec2ec45c6fb76eb88bb7bfb4df3fe69421dc7e0809e2474f987a59980fdd92b2a66ee31fb9560b4657a112ae523caec636642e44b507ed5a900fd65e29d35c89d252708b7f2c2daa29062b94577b0406ab9cda76c921694998192078e2ba7a90386e1544444c228db678f9c7da51a06b9c0a22ea26ebd3dbd8880a6e981decba2f659ddfcd15af8d06031e2d8ddc587417ab536fd4cef49372e0510c58060f2900e030fc894f1edb6aea502b0e2642a8cb1e0d22cc11a43cfe8eda906711e059d6e4a55959cc337dd54428eec2c123f5cfe185a78f442266f54213537af2f4b42176951bd9b0d1b70c61ef5e728acd" "1a5b0c8f0360fc3d4106d1f1a6a100326500e25cf6ce2c7f230e5e54526c3affad6bba78eb0a275ef942e441919384b0420571655eff68e32cd97a322e22765fe736eaf329f41b2ea005ad56acb4c092b7bcdbf2bf3e54b058827259bac8bd94ea73e1d61cba79deb078857c63e255da3b8ed4bf5d4f603d8e3e19813fbe997afbd272102aef06950ab6daab60139fae51f0fa8b48f3e056a360f074692f982aac57ac3472539e7484862997ed283dda8be4b22b83235299d1b20df4ccbf0fa24faf392a8433535d3f3cc3ad7453b9b150dae24b8c78f149b53f5394af065082540b46f6ec3e70e2428b873fa564b548cc1e39fb406ff897662ac7e901384b3094c328bd484980c120518a8504511644b0616215df50ce1ab6106762d52ef24d40b9851168c69b3068682525f1050fa3ae139c9500f89d1b5a96c35f71e25f8ac229518a79fbdbfafcd67d7356bfc3e9699f0e5a8c9fceb068f810cf2c8e3042b5fef34778a3edcda569dde4fbc240996038e50e233652eb5f303fca7f8f29c633684566f6548bbc311bd24d7e0ba95da8f02917048d9777e5f142f83cce4187ec1af72b6b6c3825e38646f9f29697f6fe3b3cd76*0", "password#"}, /* test vectors from CMIYC 2012 */ {"$dmg$2*20*dc39029a22b86bb4f930499578d0dc9eee69398e*32*bb47bff69b10ae67000000000000000000000000000000000000000000000000*48*c4559cada09552ab075e73dbefa4aea1aa21209011946e423ca707753a91c87f6c4cbed3beae20a244d33568f852068a*6*4315*504c0c37c600618fd54da114fc0eb24d6f24585568543126ac56c034cd8d7b3dd991f1418d0c95791e091921c02bf695b7835f7b0da2c1b96524e72b4bd3f671c592aa176b6a58de77a35a26bd1d0c313b2ca23581027fc52c7c63f37439404218d720171d3b178125e6ce0646bd6fa1033f2ab7b6849b3a35a430cbd1401f73b5deb478d6d0f58364579c208c613cb2349fb19adaf98be2d4a74a6030215793fe4f1129189626bb87c23d26dc2af51a98e1fabf2f58e106271c7759d104b9e5171d8f952ceeb14317614b7a14a5313029aa4068b898f7e0f5b68683feff0d375f2ada37f20135df443bae913c7e96a29c6c3388b4b51432add89ee22826ad0b1b0a4ca9233e691f71a5ae2c76b5e5a135dc793e081dc53781faa4f844928db94084b53b39f1820c8342b563e3f46b002bc52ced63e4588388e69c9e85e2002438a1a703de411717d24ea88adef3051b27def61e4b9a31548d3714c3bee39fed866254033a123429043d0c08a052d2999a171b010ffd119f90bf9222462508ac914e0a68daf93f63caaa0c4302c9b1f6447ac3856b09eb45096b3a294731f110b90826b0d611e6e045397b07e5aa64afd271f1c92664e648af648642f786c0c8aae" "6218f4282d8efa713dce232fb24df4073a0e04edc86d940e8ad22db8ca751143743f9f12585bd788551cc7b70821b5c42b133cb7781f60d1b9c345e9adb122ae444be456b8e49f9bab0e2033019b52f2ede4e7f56cc1d1dc3a48bf0666cc7a4dc6b4ffd5077673f2f6761688e4452a4c11b82598cc0ef57213f6c7c12ecc67164ae501b3e87e25a361d0615e48cde249f0193f2aa69a1eccf029340531becdee8eefbddca18905451b48c1085d4cb965786d3892d7144841300b8d2722e92af50fb828cdd8e825dbfb16328f7cf792f311f84078d45306fa570661e1ef2b34d5d36de2fc4b295f5e84fae8d55ca22bc15764932d0c5dd3cfd914b2b8f67477b2b5139c822ee2c511a03f7e9c717a5e8eca6c4b54f9c3b7d85765a78f03b29fb979811ff0c655522b341bb54ae3bc412eb760eb689c6b4c3bfb85a8ce794946214c574105e577acc01d3f8885e72db52075d05a75260a6e4a54872d087040ff38f8942cf150c3615088588cc53fed11040bed573c0e9ab14b987f9223ad089bb73284443f61ffdd61616b8a783e85618217e8bb491a31b7050421f4b0a0bfa5003775933db00e47e4452adc1433da2603f6dc5b9dfe58efe458da25699e512660ac6f1129dd9d7b176a24109c6e6e0c201d784addc9c7f8d4f309ef6fcfb02493abb7c836ba3a371e64fea941031a59adbcd4ef59f0dbf31f361f4282a0e60ced4d9d17675b0422faa1c2f932cb525ee07df7eb2643a67963aa99daf5b119884557ef1585d81eac5c8acf32438636a10d043bf" "47093fb53a5b3ad544a38fbc3588bea3ed616167a79b2133efd8c509f53626b9cd7b71828fbd5d61b1df6ef3713b5347f65e7c0770715ac1fae561cc548864f9cfe281c6e5770f053f68ace64702c81c97976f471ad11c7551789ca21a4d5480c5d3528503f2f7fcb268c34498888d5fd3edf1c71d12581c393db2ff863e22c1f6c037106e5928aac9118702b45bd36782b2295782f93458dc120e79cb3d1632c2c5e527e56060b79a751cb7653b8c0ed2acc32168b56fe5b50ff9e49a71dc9b82f812b53e095660cd7d59c04f31ee47773a04eabccd7a4a6455ebc7d719c9eaedc4e6c935fc99642acd3e60e0f564efae90d7d1308d6ddfe7eb89520c234cafca6bc7e8ac96ed401bf96e3c9de704ad124b0f9381f22d9ce846fad0b14eeb5f93eb0e0fd0657c480fd2a1109d735f3825db598e2aa7e624f282673947c38aee8832ec8d4dc5d6a7306e3477ab4e37588788109a3ed76741f8f2a796d0f5bef8247eb298fb973c4e5d13666d87b0bf5a7a553f208050dd7140f64fcc27793ea82cf58fd86ddf805a700065888bbf6b5037815afe8c03eaea355c90bbbb448de13773e977fa4c6f06e7695e80882cdac40301b537fe254eb1ee437a6ccf3efa68899a7188e6829b58977917a9d6124cd2af7cfa567fb85aac9c6b971423681a0b6658575ea0dd32054800e08be5683faf46165c56647e1c346961608bdd8e6f999eb033caf73f000a71961cf2fa8c319f4084c0ab499caab87d13aca3f057d17748522f08b36c56c1746e49d731f9355100879" "d7d114000293520c9ce71098d26b2114030615aeedabd5a6f7fb9a91f98b7ff00ec72c82136a00e5a19384084e0aebc78bb3cf05c3c1e3872f56e254c68694d930eeb46ca8e99329eb923ee0f1b5af0b7276e8600e25f18642247111eca41da427e5b9034a6a22627734ee024c2e2c4277edcb3a0309c3007c19416fa131086eccc6f73784e1a008dba5166e7c8aa4cf8efc3a4e14f59d665800982e46341b9b098508510c7dadde295a784f7a7085f5ddab5b6881b305f99d87ce3883e557280bf2a1f3adc69b7cc9d4f339623d21d569230e57a2bce611de7495d403adf451725d7ef11df4bde5a31a95bdda0d0c2a7869ddeedf2ca7e1986ef430ed44bff6ae6e44f740b2c65364477ade4dff6f4eacbffc67a2e0494c81e0424bc9220bf20aa795e2b20db6076667088b6863243ccd2bf897d4b6e1e58e2662cac593fb9a86220d65964e7f6e0f1987d07a4a8242c41c001ec38ed2442011d8a56919800b4d590338eb8db02833031ed0422bc08b11dd59b59f1d301e82154803076053464120217ca64bacc02465cdf629732cf709777452e177f4a4d1015fec4c36337ebdb8daf57f19bfeb247a27131ec5280038f3d1a766e071470ffb685cf4d9763b7e1b5776589874f3cbd4761d5fd35638918ad144a4a1bcedab9d652477951a716e4073cb36640fc257031f06e4d6f586a9a0b6172727933179e4cd433ba940571f3eb908535a12e9cc3ec1e8f8aa9975bc17241779d972a8fd8581dd3850905cec48061dd5fff1b295757e38ed8568c3a2967" "ba271e00fb507b10bdd5ac5b90426e48e596ed430b5a3c554ca1cd0d18a90809d8db18853e2580cf2b2ca52ff686b7cf360799bf69c008f87191ee372b44f96696a12632af003eba51adf1e6101628168b92c718c6f7aecb765125880f180047ec3b89fa23bf57e4fabbce38ef0fcba829123f0a3ff527dad6d6b5b0c4b0c4c4cd13787e98c829bec08728acc5e90ddc6bcfe2254eb29ae8450ae87841a39958ab80a38c8a742de64a44e25df0360a9e8672148347d7812bdfcd9037723edbc5fb4a8bba689dfe3baf113778a498e2689e8cf1ad194df422838a618b0cb222aaf020705fcfe1475a8c205690379cbe2d0b5f9a0de41a4d2e6ff85f1f19a97712bdbf49bb90051ab934407bdda9bdbc1a57b0e874f3b2a09df45b7d01bda15330ccc57a752deb2751e495e394471f09f33d98d8face401d418affeeab86be36cd8cfb0f435d9939822041f256ad860733ccf137e582e1cfb5a8b96ffe646d1928657c05c67b8589a90fb32e078697fdf8a3ec58dc6d350a7f50c83d09e5884317829d8e850b7fe17bd2ba4d7fd94b86d060a3a97880fb350b95cde4542cb7d1a2f44f8ea065ae30fd4d4b5fb24f787b8462115b3a918155bae098f0fd7ae2d4646d3731d228909f690cf0116e1ac15899513957834e0a74d8c07f0c696cd3268d631ce1292f66b2633a3287a7e058781aef9d3d566e4e41395fa7e1793aa9f669aff116b99660a5a29fe127a0459eacc3fefa4be95a13499dc844d9faf72dca38d8032932084faca23e4022869f2034ace2de0" "b286e71f2b569951214fd2eaa3d32da48a234265acec4967c74976b5b5d635eb12cff038a4a23d6c8e86a11a408aee5eedfa7209a8ce8d6bc10271e4b5627e16c5f8ce8000882c461de0113efd8ae9cec6ac4819ab2d6f8a9f189fa2929807fb20a895204edad9821d180c54e865548f9b3eafd8073a734e61d574923f0d1f69d266d970102434b0bab705465833ec9926b03798fa8a95ab98d35863b7490db07fa1abd600abcc3718d105f26f96d20e593ce0c82efc68ae65d03e4e2ed3faed27bc5799e359588fa884ac79c1ad4f5f8bcbc9a2a5605f97551710e2e416aacf149941265406490d32cc6bdde994943fac2102e57785dca3c20358cd431cee285768d9eed6ed32a9919e13f1a38304db6a57f637b6a5c8adf4e829baa82ce674ec7444fd9f7f1807b8f65d4b68ef7b6c3fe5bf653e81525f7900916f5d5809a52c070256e6b4cb332fced5e460c9a2f62bd73392bdf4522be7c211577559f59f62869e0a71f832ff493fab76bbe70f3c0b902fdf45cf49793afdb87558f1a6ec289018035d861990eca1dbfc412492cf86503af00c7db7a0a2c6374eed42b440293938a36f61e1c4c187cd50d974f2a0989b05b8ee207398560b516aea520044e37229fe0efa8b7038441fd584d79c010c0f31030d60eaa4dc1fbdb5a254c089198bb5eba6fe20655808c1d22b9604af1247e2b820823b3c622be2b01ca5f16f86af880908ace8765520c813afefef18e2c112a72fcd4760da91f7d1066cb5c8c902745b83be8defa193bc8b6b93a82efdf17" "13a223660c6ff4dbbbaccb1a4e5482cc238388448e8b9c24c9aa3acac9467e1f6d96d6deb1cbc9fbbf77b7e756068e22bc3b9e6c275987c5eb99da6a5e2d90a1e0558c4f9fc392371c07a7844cb947b19dd1a6d9c1ebb6496f36bdce2967bea2971cc1c6330b1c31054c07f8d853858a46ae9370ff1d6ab755beb120a61b4774fba521baec6fe8a079862a0471cdc5080c0f073f7e3d33f0f25978d098f61bcb4905c776ce6c0562dfe08d8b9f17de4bc2048d962ad7f4baf132cd0152a904fea9530e7c1f52a85c0188d6ca38ff9b692b2a68204a6dfbfbec06f2d800b4444503bf2dde736be4108845c5a28909cdb42391b5a0207c157003b8dbd4e43996ab5017c5f21cf0d4d9b3145c0cb70fefa767b4689cb750fa7657c4a788b7759f86496998fd4b99b2ad1b2918bf330c1a81e8986eab031e9f86cd93b7d623c72e1a394f0862a193f21eeb858524477c3192fdf5b61ce9dd5b0bf3b3d7adbfa828f1a9ecd4dabf5e318fc40262f0dd204f28b934d1af7b0d7cbcc20be21f1c7e04fdf76104767892404b14965bf8d53003ca9ff0a8f15f5d9b2e152a662ddd8eaf7902854d8561ff088fe2e880a18a036d06c29997dddbfaba32ae4ed70b47413c2a037122d830d55bfde89ba645562cfa1d29f428da108d93562bd291748a728d1b3090b8a7f56293a3135f05d6876021e92aeede437dc7ab610e1e5af0a00c880887754d76b42b059f32f9159d25ffc56a993661d06a7973d190fd10c4ac998c8627b494444389c529e41982726f47135212b67" "8b69ff36ad29e225856ad2081bd393249f469648e6ea4445e0011adfe320b4eb5cff1d9332c1779edae5d5d66931015e793f730be8482b5f488ca6372edfc71abc4b8aeaecf8051bbcc848d736eb0aa0d7ee4cdb9eaddfdcd4200c3e2f58a97a162565409abc44b8e982fb883b619fa80c7c4f2318954767ea1c63c70124f4342118f2c798adaa7ab5f6ebed1b0a15e12f40978ca8e5f0972a47cf397746f9f482902abdda10ee7f4c610935070f888b5ef8eeb07933e1d6ecaba243fb475b4c788cf8b453638ac43b9f6eb74654835678b47d9437a14300a12553fdb10daff3690e0802dab80fbffc401422a465e10e6414975358249d68e4ad5a1f1c93e295bc10b8c5c11ed98c7ca5773014a2739c0592dfa30d8756be1f66e4fcc01beb2dd58d87800e71d136c12b8f73298cd37b1bb5758376b2111921fa9f7040e69d3620415ace96ebf29fc1a87e392a9e701f4075208a1a8fda7a59b28997c017da70c18d2bbb5c91db86d701cae85a5742842fafec723be9d93b4225619c7188f5bd23c900ef3863068785363ab861b58aab8e91b562b26f72a812e7892ca0bb6ed91086a2935ba82938b367b34f70cbe40c02a8cea92a78588f90cddcabd2738c9a18450f6d3a87c7f827a1773c2c7629452f64e1528258a8ba75bc53245c705246963369f1179a765bed41d*0", "654321"}, {"$dmg$2*20*0e2a3f19e5f9a89ef8371580fc08738b0dd02ee9*32*57b5e138dcba821a000000000000000000000000000000000000000000000000*48*4a33cb05d5fc441fe39477724556bf2a3445d2826dab91031374075f9b5cda25084769a7af11b2e678d79514be8e5f63*2726*8192*585b8129cddff9f9f5875d62364faf4dccb0625867ebf2cf7ebe08913e340c8bc5b62e4c4152b2274a19c3fb7d0f6ee32e7b6c502073785bbc213c28890b9910c878702b2e16ea0c0b0ed1462b831b1eb02a0a5ef586de3e1bb7b5f70b64e713f2bfe7f401ccf0a4430981b89d23afd47d05d1d28d64917ad2895af8264350f306b7a0b67029f6da75fc60137b99131d3678cb8c596295bef4eee92110d09c52cb30486709fff75b80753378918af4db98e69905245ec52c2c6ce7e71ea62b6e530269af23836fb40cbe12a1498d3d4e66ac26b04c31d4a1cc169909f51c0468edd44d051d79c361f547d7f4891195b96950ebff98f70b36106772abb775308cd6d42fae3a60d748330dadf7ca90bd474d05cdc678a0cf41a5f4461285ce0ef0a6df3a400d0116d1d1f17cd10be2c8f164ffbc3797dc022ffe52b69f0303526d3a17c113a56e67e54b4de121787dc62977af8bcde3f4fb596762ce31460a6f97d3d07874ad42f97ace146ada9b63f579a411fca985d85d64bd3262d1d2ab5721119b0cf8348abacf7aae2f57d3b667a5997d0fa448d3da4c51a6f59c6686a92a35ff4d6d951dc74acab9d956e9a942d9356291f56046c612ff09d1e10d8a0c60" "bb2a4d273b03962f5399ff455ef480018dff09125f6c343f28b13acdbe7f0309e64406d2c453d57d6e78f10caf01d8dd274e0ca6e4a82a208750de92640ef97f67dddf90b0c6de767f185b6bf17a119a735cc97075b93fceeda807d0ec20bb4ed923ed8855202d7d285b767727bb5db55241cd21cd5a7353cc872f0d4a00fa0a50608eeb4cfbda71109a4a2ae97f2c01a40c4968c32ff2c01f05ee768b2ab22f12697805396916d8fbc1b06eeb320d619b0e472b763e7a72acd949e17620f69839543c3852c83e5c3b1cbdcfcfe0e3507a4fecfaf3f27118b6738ae8e33801cb1a2b4168f8f614dea5e673878964d6e27a1d8d8aede3bcf366400cd0155cf502cbc04234a2a418638531ef13c48917328d2bc1736e85be9cd80cf0d99b98d0baf9dd9bb3f840fd15d74788043be9f791540248b5dea621487810371995e5fff578de770699ed8de1f5190cfcd5d47320594299af29efaf204e0a411670c6f4f60652422a7e25ded5fcf26c1d83f805938c1ae578bcab6ea5c679939e5fc6593248d6b8fd55c454d2c69e8c756982c01ff76b4911ab494d90df56d7743f4d8017423a045eb4215963317164bdbb473620e8a17507a9cf26749c6141ab7b94af974db92c875ecfc4ba4421a37da4454867ea3f7d8580185eed9ae3271050d039c25f7b72e18024f91edbf3e1bba71f697c8451302b1ba97c8463b3699754fabf472ac399bd3a783b51cc945051ba1b411ea8093278606efe2b34b3992033fb773fc42cef45fb0482992d5f867416faac3912b82" "eaa852935b54c1c05d2b5be854fa75ee754235ff1e84a53564070de838fbea7704fc249a98c7fd8a4d4ffdc06d5fc0ca39071fc5be83b0e37591e14ee76379f4c5ac64b21f016517ac44a12161543c43d40a8f92237c99de44ec220fdb502d82e96f01f020eef2752279a5aa3d3928a4cb594c5e145d016375e3d7a89d2bf12d4daf3886393c31615fef9e4201cc0208821e932e8b26df396e7c29f2c0b74c9f59ab79fa44b4f9c1156741e3da93df51bb23b756657187f1902f3d5c79aed88190b4a5f814ee1010b2fe82a3edd867457dbbf0598566d80261f83db810d058e785261635cfd1260c6b3b43081deedbf0b2a30d801618090d07340a6ad528b73c7d652efdc48fed161b0a0529d5d1e80fb0a63411d53e75e9ea9873d25a3bcb243faa406293f53a21b37e80023a302682943a30c8f1a5804a3700fb92092677602c39235246f359503cb79d2e084cccd2b40840acc7ac7b18b4e1a665e3833f5b4aefb40f0b36b70dd6b125ac9999d113fed15e5cdcb6ea6043036df3dec7f5638379971758e50f1453af5e48ecddf1d46e575cd2cde1b2091c1797df41f152fa77621f69169d42398312155caa88850800f9a8792c364021463467248e385bf45cd40c7869efcd6e9a24152bcfc8370ae901c7757a19627573a8832e5ea62c344fcd60230a3915561b6fd957750af61ced54ca1ff1a8edfe5ebbad51a79777ebd4e66c63a248687220e66d923c746f56f009f9d3f1f186d987c057af87f7a70a213c9c6eb93867983c3191ee956c8991275c5" "5b07b2ef0eccb8b0287414a154afaca67f218ca43924fffe6e6161690756e3d6a19a29ca972987f603727397e5f4fa19d0c3f1e74f026d35c028bb81450c7b5493a7d837e83504ae7369a49b2354c6c6219c79ad8cf9f5bda3765541d9691b84d19cf1fb9534f859b58257e80a7548c12ca2c0fa34b8b6248b30213be0eb60de5bd04621c163e4ab00d80adec931ee00288fb98e5eaa8f6ec83af863b8a3634f955b54aff779725479d80f2fa51d25e721b159a3dd814db70836a32b3a4e55c4def271a1918805f31fd3af464c01006560b36e1ce0a745d3bb121710083101d1ee469b971400d49483b6c4d858cee24614786f227f320fe6105d61fa8cf21136e9160770167e1b7451a3d9171f56bc436f097d73dd4c21c245efd72b63fe21d1600213ab4f2250e6c5a16cfd3823de93c9c56ced668faddb77d60f4d4d9a9a3b3cb9de0eb5694410fb760b7421cbf6e40ca4e8bfd4577fc3528e0162ea4c9aef069b3e4f199120a10209a6acb1eb6e39fbb23896860eb1366c6eef023c2bd63edcf73aac6094d25cf3c1cb0caf82b1010503fc8e09bc537e8e690f8bbc0ef492f848f77442cbf28bdb42aa8932109ccefbd2ad6563fd3d315cb79a0a5f04772105e8564e01c1e22f1c2ab98813979da0a08ee8812acc1c18097b8f1fd95424ec0d1b63a85e84257d382400c5f44f570382ae8128fc0935a5f7f518ae3808b79ae7aed4990edd9257ccc74dd19adcde363d4c7e5a4594e3d3ce88d308cbb48fe26edad968cd54cb715e460c7b421f6debe9c70" "3bd684a52b6b9571a7cde4568d7656e9bbfc5559d2c60e11054cba9eb54120bdf13c4c5103fc777033014404d6b4a65ea0a716f76a1433ecb904e9ac28b0bb8ab5c5b0216f62c18aa29b685cbe1c9172d51bdef81e7ead1ebb5d6c7cb078fd32cd63c72b163d2848de4c6dd59b35e853d6ec578b681af969941c16692c9010576f6f3777a24e87084c4b78a8502d083c137237a60705080aa90b2441e2f01ef9eef5b0f2b25b2b745136cb143405fe5c7ca013f88392428868bd9f06bbe41872c4cb1f98b16d74d064e66b0c435b52913b8153d47f52fd95ee73ab1f25f1533febb72e9dbf65d11a7568a17d2e8ea2616019297846551c6a3248b0a23e91ac1f38b21878a28f828e8aeb19893478aa2ff2f16833d1b69fbffe68b569afdd1980cdf6d8d4ff52d9e2708568db1a1b50847c8310e4d85dc73b59ee31a63bc894712f2d2214973c2741f4db4f3ca9a337e1f6c4ed3858370626b62e975a85e94b498f8c3c2073e6d6fbedb40e8a356e6d6c77c2b5e13ee52fafab4c8d369ce17a5c40deb98c98b60f433889e092d7da5e7e991b73c15127364d70a879b16ae774d65834fd0029c3a1239143b6398bb19ecda0328f39f39ade7a090b2c5c4e75e4922c50f858195c7fad64e4305d04dea5b85d4dd5a52ac4e60681c2337d3a2eb0b47745563f69352e1c17b08a3625f7ba530dc5a393238b6a2b92bebe6b94966537763ef66179b5c622ac068acfaf796ed4f4214d7fbb36eba5c9216cd5ee1d42132c459042063c71a1323eaacca0a94dc119145" "cef90f744d16226d7168dc9abf46551dbe25ce179e85bd44cf15374ee498f3f3f8fb5800c6cbfc427a834e3f7b3b6b6c7333c5ed46eb2a0c93e4eaaa6f95072221d7cc27d36ad53fd5fee1e65d91e37957a9d34901602d5f49799db3cb4e47e2c5bcfe36008ff0fbf166d9e541504aeed187251b80cc72804687f58b646ca3893e8c9e4340c9580a2008d268e07f7a0705bf062c6b1ebb3a62a4c961ad2f65ec9d44c67ad3a39117d2427d9c3d067df7c089bbc905b319b30d61d099265de1ff42a97540bd08a1ec79a4cef4f692bbe54ca6f95d6ecb82d3ad2316d6cfaf9a66a8b5e5f00847b55509cdd344ccc3fc640da87be6cd4ad8ab3e510b31831d3151b2aea6675c97767076360bcfe1b317c3786dca2e4b3e90818064abb319cca7bae051390063bc6a0a0a133187a60a6eb82162a5061fba5fe17f157e9e589ad83d2f1760f4055879445b0934c954622476c29c9c577c053c723786c8d25829db7a896c66eec594a6b798ed278a824550795b0904e154fc06ce8783a773a8919b624dab70f92000b832475b77db27d0b5bbc5578765adaeac6f61166094fe11603f37a41fa047156f2e57d80a47d110901d96e33b5247a587552e37b7a0712cec420a5680ee8e5550ce5d0996b235b8898d67126415184bc9a0ec172d9f78f595182400c010d905fa73b5a6fef2f722b7f9dc51b9d21d85ec554c9f32612fcdd89577c47b3cb5203132e76ed5a39af7e9cfa2c92369464e14f8333fc29fe7a662b9373011f0d4627c9ba7b0ab0c050d0e67c625c" "dc83a0e244dcfc7f5b58ceb0d1ca2f16349ad8b16a48dbbd63da41eb5d0732a13ce5a7ee7c9088739eec6d63e0a410fb53f83cc75915c0b6353a75fd2d219986ee35bd3991161fd054f0d39c2c9da696ec2968e801cfe726cd512ddcb6cc28af65b1f8e542d1ad6a6d76dd1582dda6af4f6c9363ad7117e0ea0102cffc1ba0d94dd8abdb5ac37ef9b444387bfac2b811479086e550ce3452f77461febec72ce35d06ec70b94779b794dab1a3fba727f364bd0a65e7255da20d77ac6b85ffee926a1c3c635366a4d5c8233b798e565752103c66d5e7f18f315f7fe2641dec5944e51e373f19fbe1b34dd00f4604a4f741a5d4a8c720bf4e51511fb3316951ea63c3129c4f6242a9014a78a050e633ea5bf85960fe340c54043d9bffb969f8abe458a8c9dd02e9416e0f3504a5bdbf6cd0b4013b4b548bbe59a23149a24296e0c326d69affa61a878baff7525bea12a4bacaee6c216de31e22e218a3bffc996eb7a3b8570caa06193b56452ab7f3430c758c3b447db98c7a1faeafffa497d938d9b952e3ab3f6774333a02742375e7e1dc39cee15313d69e8cad1a251274ecf48f273cb79c58aac657adc8d77f7cd1755ad9a2fd43b69cad9d2f8bd77695dac3c43d2469e4ab34e26c7debaf33eb2ca6cb7fd0a963a37b7dfd5304b9d5f0bc1ae0940bb40375001e9920d4956f4011f4f1263c3b7cb38afa1d8f7c8c188bd226ac3e23867f3989d76a402a9476756e03c6c3bc4e3ce78095125ee11e7b47347bab7a638b0088a3b18f23abae9ab2f94650a30e2" "9abdbba8ae9d9d03cf5b12ab23f5a6464547bb7078b91f533ea06541941483359a8562e709608e0c5d1da2c7206c5af49be0df87a3244903293bbcc121fd2e20ff909a90ed836f1822ee2b40530084f02bd9c42b350a4703851d197d9c465485112f1bbb21aff46daef510159a1f354e5fb7b11508a3ffe12577b40d3bc16631f8a79191745fe828303cbe5b6d9578cd80f736971e1f108f02039e0bbcc12b42e8860cea15cc18505c3e4242ef481930f3e2c4b64ccedb5b4d9837461efc7c48f8b1a6dae1041e696b99fd8c9108ac1fa9d975b4d5a740c4e5bab92004b7c91cb64e80a67aff2596c919b73d88943538e0996a775b88857187e9f97828f8661f89252cd0c5577b27151b5b0021f17937a9abbfd8ac3946fec79a4063af00802d54eb08461f951cdbcec92f593eeba457f381a7a98f313ba28d21d2574fc751449e1c3b497e09b90f8e1840e7a56159915d98b36647dcc15e1b335102074741f1dba46f0df9e7114ca29d02a7e4581fc45c48e6b31cb291760a05774fdfdc0448abe313ca496bd2d1f011f4706072d69eb0207b0289f5dbe4d1f73355b206ab3d5c777d1d9dd65281a0dcdf598569109e8fc3b56af94e4340929457d2c45d9a9bbc37741dc031136a11955a465e0baea8c11c06ae9321dedadc498570efc3191e67354f0cae6a763e84aaf74597dc1d329c81231546df2fd965d2ce0fa2026e0ca896d48bf8cff97e9e1fc5e035a13a1dce07810a9e87c21988d7e9bf19dd68379f346d232f83d776c36791ed1ede88f8bdc1b" "62e3e7857fddb802ef7771be6a2428b7bb7e419cd95042d7de60359365efec7397b4d7fd32a4d7e8b924930606e7adc49333809812635939f79a20eae6066fc494ad27aa5be989663ed12f9f1c82d092b7a4af546f6dd33ab862fe21cc45c2c7c58842360070e206ac341c26ef2f92cc7629d873a219ea1177ac6354e7192f4c3f3aedb580c322e1644c92b9882a96addd01a35371c07b6cd3d7e4e38d089559ee41bdaeaf81650dc263a69fffa6d2713d3a8ffcadde7601cd2a87c23187463d3f3305a36ea01743d2cd846cc5ac96c89241c86b3c38ab97f1ab7b9685e68260fc116b7d02db8cff929b871dc02379d203aea4160c6302a7bad3379ce2b77effb3f9eb37d7826181ac8f606e67026fac0f43e39c72a04a6278f89d16a6c14c6d6e3dab80e9089a83c7a370726fffd0a2e6a9a6a950fad60982eb28b638ebf2315932911b91e465f076e97aacad4c6e19ec46a8ba9e7a19fca03b7796cd6d8efe6d2fbbb96b3fd3f85d4622fef029819efb34abc28143faf10ba4879fa69d493908649f03853ea84bf7d5bb21c6c541edf0c0aa96347b4102cde3c27a58ba0788ac02cdba243a3f52e0ce4d682d41d432e632635cdce5be1542b6b6a8708e144a6acf80ab3ff5842ca2db90e9d75401cfc99746a0919ed81983d2171b4093b1b07e5e5c45992f657c892e91c16cc6017a66af6466ade21f4b378a6fea6a8e4bf000ee986bbc0a170467548e7f6e797381ee89fc431f7aa562110555dfa5c275523c202744541d51701d70a8f3006ddbdfa5f72" "9563bc0234d0b2759efb747633221706cfe73d47743ce6e6077943ef6d0801729e1301ff9bbf37f50667909f1cdc70f95040c841106ce566de5dded0fa485ea539978a88ca8618e566e9da4f2e215d544ee62accbe75dc17ea26962d78bcad516e6bff3152642e346444db494a909478bf6d80aec53f3ffb3311c6283711eb96fdbdd8e6d94c71cbfb9d7ddc7f092df5092199dfd822b98e21239bb8dd17f0c101909bd38d309bb5456232f5a1b731990a4cce847394fc40b859a8d89c7c02c388e7d6ad42bcf4818de33d696ed6d6ace4c23d51fc9d7d82d0602dbea094aa2db51d9aa8ef5c1f4803e40f6f5fae44da3c3c6ce9b1003d95300871353762062d1ad49a31cae73d569bf07d147a0c8d212e60b1be486df08bc353a2e3ca7337b83e3db43be03147114c229fd32fc2eea5f64d5d5d9848709ad7335dab3909c1232d93e76eac218e7e0497ad5b7b1ca8d9ad5447879b20dd370398eb8ce4bc6805064ccdaa6d8ed1e98e259b7654a75848705dbf2c3804b455a9e3dd2890f8d74f0e968dd050ee81af2f98fdfbe831c16dae6589b9b2a16965713b8fa52e5d2d4df504411ad9c14929e560a5f7e74e98d72f71223a5eee41a40d85c177183c510881950bebd3f0ac907fbc5a4efe70a60da6bdfb6870d7fcefe04fdfffd1492c5033ec79b8de002c41895ea6e84393db391b9692983c84148928ba0fae6b2ee3aed2289a9e053d47340b5faa4870fa632c1b81c516a58a049728f941f57bc34ad53c236d33dc2ab6a196e896968d0a2bf651889" "825b8f358ef4874b0e75e39331e513c506b29a61495e78722bb25475ec2ddcda0816ff634062a54721c9fb425ff286336e7036928cfac29216dd0eacd3e5328b6979f831dccf403e87ccfc4346f5743d972d5047f6055bd86c98b8fb720a3cc3f459750ddb870a845c1ff4bc3499b1c92b6e591eca7e94f1f8d2fa3c57fc97b573a738f7f55e3b6cc975a813ffb7f897930b8de8382c5883ebffba463ce72b0c50c721db403cef01d5be035730ac3c6f6a3f78681218656f397966753c04507e08a09f7176c3e37de40b9c7faaef1b675fd083c9cced4261dbd4a289f6aa0ba04964e1a6d328ef05786933d67d6da009aaac7d4a8ca31df5a15e3874eb9b288edf7d794e1abdf9e411c5bb87f7fb27f76bd62968bba4d53844e76487818ddd38620854debdced8930ead6b46f3bce6009683d3ffedfff0be83cd8727bbcbf428c761b79a3c06a7c2de7b99394030b51eeb954cfa3fa307a37881a8dcbcedf9549e2600b72f3665946d14071d9d22894020346466bfd2062e092f21e38e920609df77e3b8ec024334c9708a415d3408e22645f06cd6d805e8da2f4005000aed542aa995816bbbf32597d9025daea32fd07733e080188d6c5c7af4ce8b7bb25d7c""50e9f3cec80e86a8f9f6d4e78a40ee20fc3c83bbbd07020f0092cdac8ffc2d52c24166d78da8ec32ebc49f815264c5ab29ab84f3b44ba75c06b80aba2966a617830efb08fd3fdda831fedeb67b7d593c661538d422e1a9fe378acf51b0f2a07f34d84624e0b90af172e5976a237a7dea10f" "a7cbfd3203d1b4985a1af6c2d2300136226b2edf519fdd2b7b5e3fb5b0c70f2e3160305fe9dd0c09b98d522666e5100532f516bfe24d12d46b5decb4d4cbdd5fe9cd647006c1c7eba14a56262fa7a3b7b6d7b22032c1d444fe023d66b7f51004c6176f4c198a2998beab66ca70e1343187ae697e9fbfa6ca6443d617552e6b7bb73c59613ce0a7cab58545bb40636f54ccdf89c507098680f4486f821b2fb2c7baa182686b0b6f893fc9575df701196b14255b547b925387cacd5f4a762b1d4b7f713e7aebe4f75ed648b8666e60a4f8d92f752451d704e19aa102bb3dda418c80f3b4f395965ec36fd9474088ac213b38220df73c8159401ff87751bbe392e0aab031de59691a0a77ba2ab7cfbf4daf09fa4d7d61dc5b456dfdbf7a60eab671ed1f1a67fd58bceb34e981a2dc3c3bb8a7a14fc8443b47a123662d96b4df2c584856ba257f39749d51caa70b147d50c68d4aafe51ee195f1ccb99b7015de726b5f0e85bf37617138d2b24d1cbe985d8d1cbb40a52e4c57e20c799e2f5ffc0557be9d3e2bc5b99dde628c4dffd5c8704c78689e967bc870c0fec80c3c69a2453b052a46e142309fb21bcbdad7c6c5a67df409bfb9899ec58ff0973e1813f47ec6428e35a932c117b5dc70a8f5b1a9fa402d59fa45714b4bd79bc214d488939f997add26d13c147aa4d4239d8aa0e3c70994eb4a8debb7cf292b3ff59bc36f97a9acad107fcc556c24a309c4a15dab16a47a71f31324dcc8183fdaabe1fbd1cb3808c1c35c311ea51188759d4e1533d39a9547f" "04054e2ef994c97e213669f08db02702dd8b54154e7376f256dedc67fcd3dc48f5e0be91f1f88766415d203bb4bb11c4a0f6d0888e0c98d3b8519aab741b20ced0e02a5638e40ad2ffc301318a77e57787995acea46eb8ff7edb535036c3b3781d63a02bce56499cd03ae75ba6610ef27124da36dce85ad406c82e72a0319dcd6e05dbc66523be5015036de859af45be32c664c18ad712bf09d361769be3e568d5f51c943ec2c9f74077cb9f5757de92c643a2963d69c2cc3f010908e661f3a6ce202d50d72a436319bb2337ab1babd4f2cf1bffc3de25a09dfc5cffb31c7080c5473b4ff673fdae11e64cd492a784a106beb65bfc01f9b7b97384d877d9f4440b7434240e98656703edd66279f1bd5b7cfacc8a6b511f1db9060e813f2e37a8be5de25087b0520e7729a873e125d7cba84b93cdd333e8756630d9dc9e1815832c8dba1a3c51776948b184a916ae44694664192af75a616387f47319bcd5da1d94fce857c8e76c3438ae5c7c810310058558e01b01cfb5676f1a5a5d027bcd1ec62428a82b78fdc9dfe69ae9c0301f6f2dbf1475e1cd1804d05cb04583ae62efe63a6f1d20d5c5675f4822ddb8f6f6af3d639f56839b1993dc40223341c04d829849dea53aba7d0d2a2db0a89881a2ecee4f66698aef5ebdbb3c6d65ff03cc1a00b714112f0b111e7a97ded2abde97767e0ea6e19a04f96d708d419f457022ac21715ca86305b8d5e4f45d6382c7ce8d87a8f0f2f1a18134deb9a33b334bc04697479c4f438f5e58a62a1b22b49580fd46eb4" "946d07c505e9c778dc56524880e8fb565487da236bb1340d92dbe21516f40a05dc3cec3fa4a56bc93ce57e7be50ef2fb38c94790acb9702dbf2ed30d6b5cc1e0173ed4c19e2822e79e711a523ecdeb6742d90353c904876e66b30fba8975d35418f0ef3fc8e5621d8d243973addf756d1e4621618fcae42af188a22f47f0f8bd0e821c16c8ca2a15e35d855ccc5c9660ebd2fe8966e6b86326905267b80358328483d0045fc63af4edda4020ecba5853f005b9058dbb81092cc12ebb3205ade902cef207f783a3921225f3a8a108eccf02cc303b11a2a7db60c897f31480db900fb1a6e1ccd1ba0aa61214037e50d8eb1ac777fc4a467ff9b9ffcaf34fe721300067d33a25f9acd43888ba09cbd26e8b269fe84065b5c44fdf734545fe21689b838eec4a00860f654df33f87d0f115a6fc1ba4f0de641f06eb8a19d2e75aad7dddc6f00c8d598015541fc8bd22540b9bd3babbbf3e41212d35cfef1236edfa5746b733de738c60901b87bfc3a4c7d49eb16e7fbb7ab93083cab5c225f79ef03db6d490169b5ecd2791fef9045e017f9dac41dbaf841f050729c6adf789b8008a82e61c80cc4d06207dbfd6b2a9cdfb67ac26280fa9ecc298dac1878fac6188066b9d8637f772136edaa7f64fa491b0bb4775656f5f1a3135686205b8217a590c088cf448892e134a29ef4cc61bd76886663afb18ad504b204ea52ef61782ce9ba44fbf2e18e1d59302a1b69717375be70a295517b069d26e161c91ec3a1a782e38efa6ac867dbe488cfddcf8c200135b059a0" "da4b4dbadda9b742b906266a879da79da144eba455fa7cc5062d326996acdddec0eba8666b0e1e6c7116a1e5f04f1e94e5d85b77b2d35deb45402a589d46734810ba3a74414eb53181f75c2f0bad61d9f4aaeb94f30a1051f5ba2b2b30f1445bfe889da81e550449d863cd5af77d49d344b63666df8206bc04686ebdaee954da5f14692bc2bf1b4b01cd6b2bfad93dcc7e5c08a5059d047f6ffe96a17c828244b234a2abf28674b15d14b735956c0a9bd438183666d6926912358edea95ac5b1b6a53784f47819a3cfd4ddb9af8e74f30e06c30e218edda9eb8207dc7cd931d6e926af59f8238225dd037b47c7a4c8af558d981a7c9a7dbae3fb66345874b27cb229f1c82b841cac0cad018e8f75d0731d5a8ea0c4d530f575de7d39d77fffde64c9d1fd87b9af3759d8a275d5a1d95f1d2d0bee007544f5c39ecf4013c80cd89821f79af3979f23dfff87d093b85b892b93bec546c5eccabf41d04c65bb571543f2312ed5e3596ec5d6bf8e57e9854164d34b48ca0ca4044a526e038332348eb801a6ff342bf25750abbcfc27e7cb5e7b026db3743b210b91d1fb688c8f16d4e40203d39272f22b5bd0f796f0fa09c90*1*b48bda800b2b3665adca330cfc990283a604b08074521335437c0ed7f2a997069c88d620b638ee988edb3f6f32be1ccd01ffb14b66b2c213d31aad92b25f66f226f2793b5e554475ce8c1a7f9541ce66c594379303ce730fd77a6591c97f5bdc400ba7e8cbd496c188c2112208778ff9699674b117631d8f385ebe45ed91dd60a" "4a657ca39c11c135e426c03ce2219392f55c635c1736f31b1a7a892273b6d9e2867864606aa0244b82c8be1748123f0b8478baa9402521583f24ac86c11801fe340e64628e8840aee6a093b1bf25aa05c74d1c1dd8ec48321b34a53bf78347a59fa9ee394a60b845cfd4c2f5bc53541065f1c5a0d3953d9808b26ee51d17dc026ea97a2ffae213bb9818f3c4009480ac0d1774e6237546204339db20ab366a805ba8c34304070959a16639006ced72bc3ba6430ef7e5a10e9a969ee233efc23b2d99bd8d49c3615f0da372cb98e077829f07e112a5bf4357a3cdee0268bbee69d31fea1ac66564d4b1c7c303f9b41e2b23b3c7825d1ef93ae1ca1aed1607177bf92cdce38fc68325a652efd3791e922a196eba24e9816c52afeb1d84577b8a22125c1d90beb57cacff4b2a637061d69bf7f1f006d102ca2acb8471909689d36196ec300691ddb9369868f3fd577e463d8b74c7a8e95fe2fd2954136f9650f7301d4a91d9c41f647675d37c1663d4b5c50cfb175facf30598a9be1ecc2f33fd4ec7e1ecc7dffbb1180a5b224b4eb6d0e0af4ecad6cbcb2a26cb3365a723caa2eacf9404083a427d5e7e62e967875e53a8eaf4f5873627717ce802b6b66d627f3390b50c0c950dac739ab46fad66920de3fb8edb0ad0a3c93e7b3beeb90a26a1553aecf4d1f3b17b7f852cf5441bd626012ca14d8e4aa2c43ef6a272f9f6990672b2ead99d839617069117aa10f840c379fc62de5ebf5c82ed59a5a1f76b0fec724ea809411709d88fd2f986c35edf9a562e3fd" "bb13577e2ac78bb854768ab38850daf931c1b8cc3e6f3c244fb339d288348f88f792954e90b68d664b7f941b634aec4b2d54995ba08b999d32d007e85e7e0df4dc6022b0d6d7a23ac5bcbfb2dd6cdc300fd0e4c9b4403a53a67a1c8979774833ba4b8f338b1932424b8654e02ff039967bb43c3f0661bf22f638a4caef57d50acce63e472f1316fdb93e75218d630d958c1aef855a9a7bc54122a26ff94d78e74d48aff82a485f584b8acbea147666712d35a7167dc5f92ef4059e42c28ba66fbdccaafe71efc630b8ce7fd840bd2802c2d69a4b09a11cf17c9321d9ccfb1623bfaa89786df732b405e2cf118611e9ff153dd2db2df1953fdd888f023e74e23f3a5595b81456b6ffb33e91d65f08fc8eab545412b18be47d14ab77827073286a735187bed1b12fbed879969f7d06c53041a6bd79bf6c5260342480cdb50cb617c2b4111da501ea98f368320094c5353a36df520824ec52dd15e818bec43d80b537c0d809845645429ea4f7635528cb7b8149924053a76d3c05b0c31e5970eaa014708c64c902be5272513111a73e682ed9f473c87b964a4957934424bf957d1e86c6c90a967a8643eec2b65f08d4c91252cb9663a4e5aa4ad9180166ac633c0e5f5170656373489126e6be09e9e8bd6f226f0833bd392884dfce749d68ad51b1f0e0ef5fc5a8876e54558e191abcfc4632409547a8a5c46c2b546db07ba324b4d327ebe86f87dac27b64d6e0c8250019c1114a4f8fa39523dc3f5d597aa33af245ecca15ea8cbef7604eca5ed804ac4f57c12" "6e335763925b88128b7289566270a5d7d1602481647f74d71bc1eafd0913851bcf07047dfef51b41fc02215d136885e647001f9f47546e9ea6ba0beab1d8a276cf9b85d780c05d4031f55d35d54c56f7fceeae9d62c58e7e928e591c2d6b1d14391f829f3e30bda6132bc513227cfad357be2c6f045bad7be72d01ceccd059327a72ce044edd534a5ddf71831bf07ebe84806feb621a5b8d71f4a608878e5e5daf3f8b4b3eda75f74f03d1ae5aebd029f037f66253f542aa06cd6c29ac5ed27ecdc7641fb6d54c98e71491772944303d3b6be683ac44b7bda5d49209133ff564cee31912b8e024cf628e0719522b11eff2e32874818f9a0ebde427657558a72943d6eb25c4b9d523336f37453af157035a3bc5ffd13847a928450d4e01f2ce7ca51d456939363c3e5a69b0d25311682c7b266cf86d12b63dcd322be77594c7f929a77467566a8d86a7d2b583b95f76626244738251fa762e0b2825c7668d6dde8ac5579c1a06318e5c5a6b2b1bc93bce6cd4853c50b6662482549290b15500722e3d6772c7541e3c864291dcbed84496dcc9ff4dddc974aa8b17b7ccea56c856f24ee2277a391c3c0c2c5584111ed24fe64e478e3c4d22380b8183222570fa3c70d29230aa21fd21808baacfd41e2430fed7c3316235e6b4c2c3331ee36d9e5c94ddbd73b351897cab7ede8a7c417c753d8023cf46694acbc9aa6ca556da7de108005330704cf54b1ec7bf7df02e36cd736237316b3523bca0a53a2472e68d30d95b1eb49282b27530bc69cd154b7a4dce75d" "a3efc65c12ce45de7a63632d340fc61a1789129df1554813a15c9a6ad101c07363ba8d967b70ae1767f8927440678bab989dbe994922779c3c277055a35bf12d6909caba8a4b6bec7f49dd32426d858e53164c8db77bd1b9321b31e6c1ad1e92596bec4ad39d5b6944c7585a5ad0c6f83f64727a7f6397f784d865ba3b9c85343f3a2828a0e71d75f19036ea0f17e265750d6a01513be2bee0bd0a837996971b87305dafda12679bc118a1df188888396e10074254e4aeecb6801e00e8f3ade2889b65aba9e29d2d146001740116c893df1899175dbbf88ec175216df3d93a88fb6957adf64a3849e26194edb91188c0373fdf9be85a520c173817ccac3e4e9c88ce0bd9448be3f6cf3eb92b9337ecf2e63db5887e1113ee31529c373e83ec02012ddaa8812fa5c6b8be8febe29d0c286fe03832aee79018fdbaedd8bec03345c05faa1231ad148bf4531679738a537ec490bdcf78a0d9dd13e6988e360273c388b91006a66176c93caf3594cb098d5f4287a37d79b636eb566eaeb73ef76a4a480fad73caad3378d17a9395bf71c6c43f643b04b4f1773939329470e51053467b67ed8ac0807b8806d26d16f6f4fc15b3f3cc197d24ea26418cf970a5e7009bd871aff96be823fd80efe1adcaa882c168692b53bdb47effc666a1768d04d0d8bf199d36604e82b72fcce53e86d063c347aeecc79a846f8e12cdec679b857f85a75fe59a1338a411950459443b3fec6511dcc78d5bb6dc60accd6013400c0ef71f19d7713b37777a75e96d0d341d416c9cd94" "7e3c442f6ddb31daec66bd96ca31b01d2dfb99d312a651ba5ec1765354de39d7aa4bb096ce7edbd93829d8ee2b7e3ff364f5d87f653a541f033db6c3266a03046f8612ad8d56a1c78912c9774c86a8d7e2eaa7f3bb1033470789ac2c32bd3c2ba1269bb01b176b167688f8fbe1f6094c3e2736bdc1cb1733364011681be98047cdad7d998241e121e6508cfd665c42b30f22bc442f940b5c7d93659f59abcb17aab1f28a02d0b59239f148211c525dd209cb932c54f24fa8a9541f0eab28b4c8df80845058e71e5447959bfc7f7d28e15542523410bc162f566875ed6d9d4fba519000b8c5d90f894f2bc74dc8307e26d4e0a9b418487d7470fbd64e97e660a3038a10a26a80e7cca09a3280ce3c87d07befd6f65127096d6075a18f30906828cee1f8b968dd3247210041078cf6d28f05977e5c172a9ecd83167873881e0ffcc56615ad0d64b0189ed8d559e43cccb1e2f8805df7156cb11f5df9dfbc067fce9fb3ee3230e28edfcf98741b9883f9f0f42913cc2be1036a0590107c69a9fadd4c9fc39df872f0db664ea7172fd72e0ad756be95417487d0c2bb38061c52124dcb2545f15a5bfd39d950b5878a067945733d8b1dc37cb85dd9393c98b0751c83d8e848fd1bd3ad243f6a8af7a8cb8cda7e1dc05324fa3932423fea0428131646534e74398f1604146da26a615045ee49ae2df3c8fcd16da64672845a946de4c26c1417c534a2b62a408a8c30c2e4f73ee44571259b628249c9e3f65e7b8d22002a170e7e53dc7c4cdc0073491db2cd6de20cd" "df07501ff08378ac1cfe3ef479491f3fc475f8aa1fb188706c264e276da3e0399e2bc17cffd6ad0ff94d2d3b9a3b46e8c1472c41fc1c002daa76634f94b3bdf8560cb3241352c6f1be21fee70cd54a1d96e31d71ef99589b93e7ca8d026abcb4a4fbfc8c0f57d59a6d9e760f02fd0a569702da7f59da495c2dd7f92d60fb3220cd7932a032d40ed29deaa5fe971128c6503eb9d1029a23ed6dc4fd5e8c5cf0347841424d60a5a07a9781d08c85222cf7241d199609762488332a6eafbc08cec42c876da9bd3fa287bca12f71b6e33c4453afb970b425a45b9baa9aa69ebb3907e06e6610f100b00c86752b2c106c2e0b71963f1933d315ceef89132c7744149db0c28f62b3d7b43d570d1f5c40bf4b7470b3b8de30b0d756b8326542743f2fa5cf3eff226b6a658ecbe44dc9a0e59f073f999d8c3340ba30ecff6f2fa4f3815f0d4c665b5109ce8984971e5cbec806888c2acdf73d2a330de9e5133787aa4950d08759f4cfcb55ec8efb43d421cf3a9f601a096677eb95f61e352a9adae7c0b971fb455f170c7ed95329b699d6e93f024786507e2e0acbeffb452c26d8c041cb88316d09a08af54ec48451f9bb685a23910e97ac82bb41f19f6b42fa10cfb75f9fa8edd61653c14a27b51544e3fb28009aab76d060135df2d097fd4c2f2e63dba1192c648215fdd1dace4824d71e038e23184ede7f61baefd747aed93b9807d0b3b7b4f7cb9eb171d1ba241b19cf1c74781eaaaca99a458253777522dedcf3d1db6bd4eec4459e59ad635904201b5d91c77bb" "b6e91f00f5a6f29794b35afde3dcd850f08ac5da097549ded05159567e9f7a023e08e49253766c0e151852714987201e90df675368ee638a947b7e6dc20bedf60656971170afe2d453662685dc1ceef8436ca8071680d0346239b41a6825839e9d5af12f9574d51b4672c5fa7f84bac497c8ba5fad2c10fbffe5ee713090b903d7723cd28c1b189a47c6a9fe9a88d0881dd60d1970c6e8a6d812bbd089c10841e5ced1417bef41f400118fa990d157bca93267d407989de017bd48f0231d43b9487526072e2755461274b3f5bf27847dda36c652a2b1fdd3815fd4ab93863426b31ecd1e6a9094dd2ed0190f8138e650dd2174fcc6b6ab1b8b91cc8020f2dcbb14855e7dd0bc1b5a01f55f81c0476daf1684cc4e72a68327120730ae92c45ab4e447c4ee900d61f79681667eec61343e4eebdd65c5b38a1ba5e3478f4d2f59d184ec39aca445a0f6edaa6840f04bfc19acf23db4507609cbdb44514b36aa5ef4ffe46577b711d1028970916eae919f1b4913d5894a24117cd7cc1aa8965840865554ce663af470455c0f756c795fb29eec04b727b12f7f3796f572ca2ec1e8771a88f68999e16b2acb235a7d9146f85f2be5a034babc3bdde750eb7895396d4777c144aee517a07310dcc8c9ce0ead93abb7f1eb4e34ed5036361d682c97eac1ad7c8158035e40a713f0f2e6f6e677d4b11ecc97e101a5b48420435dd218846ae622b416faeba7e0003bbbece71c2aa046715173b408c8ab2888b0b5dc4c34683f83ba9a83795f86122e6d80597d3a952a44f" "5a1edb6f294a0ceebefc3cb54db814cf91fe450ed4c71d0b4091a1fc7474", "goodjob"}, {NULL} }; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(dmg_password) * gws; outsize = sizeof(dmg_hash) * gws; settingsize = sizeof(dmg_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(dmg_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; char *p; int headerver; int res; if (strncmp(ciphertext, "$dmg$", 5) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 5; /* skip over "$dmg$" marker */ if ((p = strtokm(ctcopy, "*")) == NULL) goto err; headerver = atoi(p); if (headerver == 2) { if ((p = strtokm(NULL, "*")) == NULL) /* salt len */ goto err; if(!isdec(p)) goto err; res = atoi(p); if (res > 20) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p) != res*2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ivlen */ goto err; if(!isdec(p)) goto err; res = atoi(p); if (atoi(p) > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p) != res*2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* encrypted_keyblob_size */ goto err; if(!isdec(p)) goto err; res = atoi(p); if (res > 128) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* encrypted keyblob */ goto err; if (hexlenl(p) != res*2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* chunk number */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* data_size */ goto err; if(!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "*")) == NULL) /* chunk */ goto err; if (hexlenl(p) != res*2) goto err; if (res > 8192) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* scp */ goto err; if(!isdec(p)) goto err; res = atoi(p); /* FIXME: which values are allowed here? */ if (res == 1) { if ((p = strtokm(NULL, "*")) == NULL) /* zchunk */ goto err; if (strlen(p) != 4096 * 2) goto err; } } else if (headerver == 1) { if ((p = strtokm(NULL, "*")) == NULL) /* salt len */ goto err; if(!isdec(p)) goto err; res = atoi(p); if (res > 20) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p) != res*2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* len_wrapped_aes_key */ goto err; if(!isdec(p)) goto err; res = atoi(p); if (res > 296) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* wrapped_aes_key */ goto err; if (hexlenl(p) != res*2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* len_hmac_sha1_key */ goto err; if(!isdec(p)) goto err; res = atoi(p); if (res > 300) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hmac_sha1_key */ goto err; if (strlen(p) / 2 != res) goto err; } else goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 5; p = strtokm(ctcopy, "*"); cs.headerver = atoi(p); if (cs.headerver == 2) { p = strtokm(NULL, "*"); cs.saltlen = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.ivlen = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.ivlen; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.encrypted_keyblob_size = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.encrypted_keyblob_size; i++) cs.encrypted_keyblob[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.cno = atoi(p); p = strtokm(NULL, "*"); cs.data_size = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.data_size; i++) cs.chunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.scp = atoi(p); if (cs.scp == 1) { p = strtokm(NULL, "*"); for (i = 0; i < 4096; i++) cs.zchunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } if ((p = strtokm(NULL, "*"))) cs.iterations = atoi(p); else cs.iterations = 1000; } else { p = strtokm(NULL, "*"); cs.saltlen = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.len_wrapped_aes_key = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.len_wrapped_aes_key; i++) cs.wrapped_aes_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.len_hmac_sha1_key = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.len_hmac_sha1_key; i++) cs.wrapped_hmac_sha1_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; if ((p = strtokm(NULL, "*"))) cs.iterations = atoi(p); else cs.iterations = 1000; } if (cs.iterations == 0) cs.iterations = 1000; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, 20); currentsalt.length = 20; currentsalt.outlen = 32; currentsalt.iterations = cur_salt->iterations; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int apple_des3_ede_unwrap_key1(const unsigned char *wrapped_key, const int wrapped_key_len, const unsigned char *decryptKey) { DES_key_schedule ks1, ks2, ks3; unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)]; unsigned char TEMP2[sizeof(cur_salt->wrapped_hmac_sha1_key)]; unsigned char IV[8] = { 0x4a, 0xdd, 0xa2, 0x2c, 0x79, 0xe8, 0x21, 0x05 }; int outlen, i; DES_set_key((DES_cblock*)(decryptKey + 0), &ks1); DES_set_key((DES_cblock*)(decryptKey + 8), &ks2); DES_set_key((DES_cblock*)(decryptKey + 16), &ks3); DES_ede3_cbc_encrypt(wrapped_key, TEMP1, wrapped_key_len, &ks1, &ks2, &ks3, (DES_cblock*)IV, DES_DECRYPT); outlen = check_pkcs_pad(TEMP1, wrapped_key_len, 8); if (outlen < 0) return 0; for (i = 0; i < outlen; i++) TEMP2[i] = TEMP1[outlen - i - 1]; outlen -= 8; DES_ede3_cbc_encrypt(TEMP2 + 8, TEMP1, outlen, &ks1, &ks2, &ks3, (DES_cblock*)TEMP2, DES_DECRYPT); outlen = check_pkcs_pad(TEMP1, outlen, 8); if (outlen < 0) return 0; return 1; } static int hash_plugin_check_hash(unsigned char *derived_key) { unsigned char hmacsha1_key_[20]; unsigned char aes_key_[32]; int ret = 0; if (cur_salt->headerver == 1) { if (apple_des3_ede_unwrap_key1(cur_salt->wrapped_aes_key, cur_salt->len_wrapped_aes_key, derived_key) && apple_des3_ede_unwrap_key1(cur_salt->wrapped_hmac_sha1_key, cur_salt->len_hmac_sha1_key, derived_key)) { return 1; } } else { DES_key_schedule ks1, ks2, ks3; unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)]; AES_KEY aes_decrypt_key; unsigned char outbuf[8192 + 1]; unsigned char outbuf2[4096 + 1]; unsigned char iv[20]; #ifdef DMG_DEBUG unsigned char *r; #endif const char nulls[8] = { 0 }; DES_set_key((DES_cblock*)(derived_key + 0), &ks1); DES_set_key((DES_cblock*)(derived_key + 8), &ks2); DES_set_key((DES_cblock*)(derived_key + 16), &ks3); memcpy(iv, cur_salt->iv, 8); DES_ede3_cbc_encrypt(cur_salt->encrypted_keyblob, TEMP1, cur_salt->encrypted_keyblob_size, &ks1, &ks2, &ks3, (DES_cblock*)iv, DES_DECRYPT); memcpy(aes_key_, TEMP1, 32); memcpy(hmacsha1_key_, TEMP1, 20); hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cur_salt->cno, 4, iv, 20); if (cur_salt->encrypted_keyblob_size == 48) AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key); else AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->chunk, outbuf, cur_salt->data_size, &aes_decrypt_key, iv, AES_DECRYPT); /* 8 consecutive nulls */ if (memmem(outbuf, cur_salt->data_size, (void*)nulls, 8)) { #ifdef DMG_DEBUG if (!bench_running) fprintf(stderr, "NULLS found!\n\n"); #endif ret = 1; } /* These tests seem to be obsoleted by the 8xNULL test */ #ifdef DMG_DEBUG /* </plist> is a pretty generic signature for Apple */ if (memmem(outbuf, cur_salt->data_size, (void*)"</plist>", 8)) { if (!bench_running) fprintf(stderr, "</plist> found!\n\n"); ret = 1; } /* Journalled HFS+ */ if (memmem(outbuf, cur_salt->data_size, (void*)"jrnlhfs+", 8)) { if (!bench_running) fprintf(stderr, "jrnlhfs+ found!\n\n"); ret = 1; } /* Handle compressed DMG files, CMIYC 2012 and self-made samples. Is this test obsoleted by the </plist> one? */ if ((r = memmem(outbuf, cur_salt->data_size, (void*)"koly", 4))) { unsigned int *u32Version = (unsigned int *)(r + 4); if (HTONL(*u32Version) == 4) { if (!bench_running) fprintf(stderr, "koly found!\n\n"); ret = 1; } } /* Handle VileFault sample images */ if (memmem(outbuf, cur_salt->data_size, (void*)"EFI PART", 8)) { if (!bench_running) fprintf(stderr, "EFI PART found!\n\n"); ret = 1; } /* Apple is a good indication but it's short enough to produce false positives */ if (memmem(outbuf, cur_salt->data_size, (void*)"Apple", 5)) { if (!bench_running) fprintf(stderr, "Apple found!\n\n"); ret = 1; } #endif /* DMG_DEBUG */ /* Second buffer test. If present, *this* is the very first block of the DMG */ if (cur_salt->scp == 1) { int cno = 0; hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cno, 4, iv, 20); if (cur_salt->encrypted_keyblob_size == 48) AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key); else AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->zchunk, outbuf2, 4096, &aes_decrypt_key, iv, AES_DECRYPT); /* 8 consecutive nulls */ if (memmem(outbuf2, 4096, (void*)nulls, 8)) { #ifdef DMG_DEBUG if (!bench_running) fprintf(stderr, "NULLS found in alternate block!\n\n"); #endif ret = 1; } #ifdef DMG_DEBUG /* This test seem to be obsoleted by the 8xNULL test */ if (memmem(outbuf2, 4096, (void*)"Press any key to reboot", 23)) { if (!bench_running) fprintf(stderr, "MS-DOS UDRW signature found in alternate block!\n\n"); ret = 1; } #endif /* DMG_DEBUG */ } #ifdef DMG_DEBUG /* Write block as hex, strings or raw to a file. */ if (ret && !bench_running) { #if DMG_DEBUG == 4 int fd; if ((fd = open("dmg.debug.main", O_RDWR | O_CREAT | O_TRUNC, 0660)) == -1) perror("open()"); else { #if FCNTL_LOCKS struct flock lock = { 0 }; lock.l_type = F_WRLCK; while (fcntl(fd, F_SETLKW, &lock)) { if (errno != EINTR) pexit("fcntl(F_WRLCK)"); } #elif OS_FLOCK while (flock(fd, LOCK_EX)) { if (errno != EINTR) pexit("flock(LOCK_EX)"); } #endif if ((write(fd, outbuf, cur_salt->data_size) == -1)) perror("write()"); if (cur_salt->scp == 1) if ((write(fd, outbuf2, 4096) == -1)) perror("write()"); if (close(fd)) perror("close"); } #endif #if DMG_DEBUG == 3 dump_stuff(outbuf, cur_salt->data_size); if (cur_salt->scp == 1) { fprintf(stderr, "2nd block:\n"); dump_stuff(outbuf2, 4096); } #endif #if DMG_DEBUG == 2 dump_text(outbuf, cur_salt->data_size); if (cur_salt->scp == 1) { fprintf(stderr, "2nd block:\n"); dump_text(outbuf2, 4096); } #endif } #endif /* DMG_DEBUG */ } return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (hash_plugin_check_hash((unsigned char*)outbuffer[index].v) == 1) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_opencl_dmg = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef DMG_DEBUG FMT_NOT_EXACT | #endif #ifdef _OPENMP FMT_OMP | #endif FMT_CASE | FMT_8_BIT, { "iteration count", }, dmg_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
conv_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #include "conv_kernel_arm.h" #include "api/c_api.h" #include "utility/sys_port.h" #include <math.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "wino_conv_kernel_arm.h" #ifdef __aarch64__ #include "wino_conv_kernel_1_arm.h" #endif #ifdef __aarch64__ #define PER_OUT_CHAN 16 void sgemm_4x16_a72(float* biases, float* input, float* kernel, long kernel_size, float* output, long output_xy, int activation, int layout); void sgemm_4x4_a72(float* biases, float* input, float* kernel, long kernel_size, float* output, long output_xy, int activation, int layout); #else #define PER_OUT_CHAN 12 void sgemm_4x12_a17(float* biases, float* input, float* kernel, int kernel_size, float* output, int output_xy, int activation, int layout); void sgemm_4x4_a17(float* biases, float* input, float* kernel, int kernel_size, float* output, int output_xy, int activation, int layout); #endif void im2col_fp32_1x1(float* input, int input_xy, float* col, int col_cnt, int input_chan); void im2col_fp32_3x3(float* input, int w, int h, int channel, float* cur_col, int stride); static void interleave_kernel(float* kernel, float* kernel_interleaved, int kernel_chan, int kernel_size) { int i, j, k; float* cur_kernel[PER_OUT_CHAN]; float* cur_kernel_interleaved = kernel_interleaved; // interleave PER_OUT_CHAN kernels for (i = 0; i + PER_OUT_CHAN - 1 < kernel_chan; i += PER_OUT_CHAN) { for (k = 0; k < PER_OUT_CHAN; k++) cur_kernel[k] = kernel + kernel_size * (i + k); for (j = 0; j < kernel_size; j++) { for (k = 0; k < PER_OUT_CHAN; k++) *(cur_kernel_interleaved++) = cur_kernel[k][j]; } } for (; i < (kernel_chan & -4); i += 4) { for (k = 0; k < 4; k++) cur_kernel[k] = kernel + kernel_size * (i + k); for (j = 0; j < kernel_size; j++) { for (k = 0; k < 4; k++) *(cur_kernel_interleaved++) = cur_kernel[k][j]; } } // last 4 kernel for (k = 0; k < 3; k++) cur_kernel[k] = kernel + kernel_size * (i + k); if ((kernel_chan & 0x3) == 3) { for (j = 0; j < kernel_size; j++) { for (k = 0; k < 3; k++) *(cur_kernel_interleaved++) = cur_kernel[k][j]; *(cur_kernel_interleaved++) = 0.f; } } else if ((kernel_chan & 0x3) == 2) { for (j = 0; j < kernel_size; j++) { for (k = 0; k < 2; k++) *(cur_kernel_interleaved++) = cur_kernel[k][j]; *(cur_kernel_interleaved++) = 0.f; *(cur_kernel_interleaved++) = 0.f; } } else if ((kernel_chan & 0x3) == 1) { for (j = 0; j < kernel_size; j++) { *(cur_kernel_interleaved++) = cur_kernel[0][j]; *(cur_kernel_interleaved++) = 0.f; *(cur_kernel_interleaved++) = 0.f; *(cur_kernel_interleaved++) = 0.f; } } } /* kernel interleave */ static void interleave(struct tensor* filter, struct conv_priv_info* priv_info, struct conv_param* param) { int group = param->group; int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3]; int out_chan = filter->dims[0] / group; int out_chan_align4 = (out_chan + 3) / 4 * 4; int kernel_size_algin = kernel_size * out_chan_align4; int kernel_size_group = kernel_size * out_chan; float* kernel = filter->data; float* interleave_buf = priv_info->interleave_buffer; for (int g = 0; g < group; g++) { float* cur_kernel = kernel + g * kernel_size_group; float* cur_interleave = interleave_buf + g * kernel_size_algin; interleave_kernel(cur_kernel, cur_interleave, out_chan, kernel_size); } } static void im2col(float* input, float* col, int in_c, int in_w, int in_h, int k_w, int k_h, int s_w, int s_h, int d_w, int d_h, int pad_w0, int pad_w1, int pad_h0, int pad_h1, int out_w, int out_h, int num_thread) { if (k_w == 1 && k_h == 1 && s_w == 1 && s_h == 1) { int kernel_size = k_w * k_h * in_c; int in_xy = in_w * in_h; int out_xy = out_w * out_h; int col_end3 = out_xy & 3; #pragma omp parallel for num_threads(num_thread) for (int col_i = 0; col_i < out_xy - 3; col_i += 4) { float* cur_col = col + col_i * kernel_size; float* cur_input = input + col_i; im2col_fp32_1x1(cur_input, in_xy, cur_col, 4, in_c); } int col_i = out_xy & -4; float* cur_col; // final 4 input if (col_end3) { cur_col = col + col_i * kernel_size; for (int col_j = 0; col_j < kernel_size; col_j++) { for (int i = 0; i < 4; i++) { if (i < col_end3) *cur_col++ = *(input + col_j * in_xy + col_i + i); else *cur_col++ = 0; } } } } #ifdef __aarch64__ else if (d_w == 1 && d_h == 1 && k_w == 3 && k_h == 3 && s_w == s_h) { int kernel_size = k_w * k_h * in_c; int in_xy = in_w * in_h; int out_xy = out_w * out_h; int col_end3 = out_xy & 3; int is_pad0 = (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0); #pragma omp parallel for num_threads(num_thread) for (int col_i = 0; col_i < (out_xy & -4); col_i += 4) { float* cur_col = col + col_i * kernel_size; int imy0 = col_i / out_w; int imy3 = (col_i + 3) / out_w; int imx0 = col_i - imy0 * out_w; int imx3 = (col_i + 3) - imy3 * out_w; if ((imy0 == imy3) && (is_pad0 || (imy0 != 0 && imx0 != 0 && imy0 != (out_h - 1) && imx3 != (out_w - 1)))) { float* l0 = input + (imy0 * s_h - pad_h0) * in_w + (imx0 * s_w - pad_w0); { im2col_fp32_3x3(l0, in_w, in_h, in_c, cur_col, s_w); cur_col += 4 * kernel_size; } } else { int cnt_y[4] = {imy0, (col_i + 1) / out_w, (col_i + 2) / out_w, imy3}; int cnt_x[4] = {imx0, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2, imx3}; int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0, cnt_x[3] * s_w - pad_w0}; int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0, cnt_y[3] * s_h - pad_h0}; for (int kch = 0; kch < in_c; kch++) for (int ky = 0; ky < 3; ky++) for (int kx = 0; kx < 3; kx++) { int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx}; int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky}; for (int i = 0; i < 4; i++) { if (imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h) *cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]); else *cur_col++ = 0.f; } } } } // final 4 input int col_i = out_xy & -4; if (col_end3) { float* cur_col = col + col_i * kernel_size; int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w}; int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2, col_i - cnt_y[3] * out_w + 3}; int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0, cnt_x[3] * s_w - pad_w0}; int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0, cnt_y[3] * s_h - pad_h0}; for (int kch = 0; kch < in_c; kch++) { for (int ky = 0; ky < 3; ky++) { for (int kx = 0; kx < 3; kx++) { int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx}; int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky}; for (int i = 0; i < 4; i++) { if (i < col_end3 && imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h) *cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]); else *cur_col++ = 0.f; } } } } } } #endif else { int out_xy = out_w * out_h; #pragma omp parallel for num_threads(num_thread) for (int col_i = 0; col_i < out_xy - 3; col_i += 4) { int kernel_size = k_w * k_h * in_c; int in_xy = in_w * in_h; int col_end3 = out_xy & 3; float* cur_col = col + col_i * kernel_size; int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w}; int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2, col_i - cnt_y[3] * out_w + 3}; int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0, cnt_x[3] * s_w - pad_w0}; int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0, cnt_y[3] * s_h - pad_h0}; for (int kch = 0; kch < in_c; kch++) for (int ky = 0; ky < (k_h * d_h); ky += d_h) for (int kx = 0; kx < (k_w * d_w); kx += d_w) { int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx}; int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky}; for (int i = 0; i < 4; i++) { if (imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h) *cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]); else *cur_col++ = 0.f; } } } int col_i = out_xy & -4; float* cur_col; int kernel_size = k_w * k_h * in_c; int in_xy = in_w * in_h; int col_end3 = out_xy & 3; if (col_end3) { cur_col = col + col_i * kernel_size; int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w}; int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2, col_i - cnt_y[3] * out_w + 3}; int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0, cnt_x[3] * s_w - pad_w0}; int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0, cnt_y[3] * s_h - pad_h0}; for (int kch = 0; kch < in_c; kch++) for (int ky = 0; ky < (k_h * d_h); ky += d_h) for (int kx = 0; kx < (k_w * d_w); kx += d_w) { int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx}; int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky}; for (int i = 0; i < 4; i++) { if (i < col_end3 && imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h) *cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]); else *cur_col++ = 0.f; } } } } } static void sgemm_set(float* col, float* kernel, float* biases, float* output, int kernel_size, int col_start, int col_end, int kernel_start, int kernel_end, int output_xy, int activation, int num_thread, int cpu_affinity) { int col_end3 = col_end & 0x3; int nn_outch = kernel_end / PER_OUT_CHAN; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * PER_OUT_CHAN; float* biasptr = biases ? (float*)(biases + p) : NULL; float* kernel_tmp = (float*)(kernel + p * kernel_size); float* output_tmp = (float*)(output + p * output_xy); for (int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4) #ifdef __aarch64__ { float* col_tmp = (float*)(col + col_line * kernel_size); sgemm_4x16_a72(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0); } if (col_end3) { int col_line = col_end & -4; float result[4 * PER_OUT_CHAN]; float* col_tmp = (float*)(col + col_line * kernel_size); sgemm_4x16_a72(biasptr, col_tmp, kernel_tmp, kernel_size, result, 4, activation, 0); for (int i = 0; i < 16; i++) { for (int j = 0; j < (col_end3); j++) { *(output + (p + i) * output_xy + col_line + j) = result[(i << 2) + j]; } } } #else { float* col_tmp = (float*)(col + col_line * kernel_size); sgemm_4x12_a17(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0); } if (col_end3) { int col_line = col_end & -4; float result[4 * PER_OUT_CHAN]; float* col_tmp = (float*)(col + col_line * kernel_size); sgemm_4x12_a17(biasptr, col_tmp, kernel_tmp, kernel_size, result, 4, activation, 0); for (int i = 0; i < PER_OUT_CHAN; i++) { for (int j = 0; j < (col_end3); j++) { *(output + (p + i) * output_xy + col_line + j) = result[(i << 2) + j]; } } } #endif } } static void sgemm4x4(float* col, float* kernel, float* biases, float* output, int kernel_size, int col_start, int col_end, int kernel_start, int kernel_end, int output_xy, int activation, int num_thread, int cpu_affinity) { int col_end3 = col_end & 0x3; int kernel_end3 = kernel_end & 0x3; #pragma omp parallel for num_threads(num_thread) for (int kernel_num = (kernel_start & -4); kernel_num < (kernel_end & -4); kernel_num += 4) { float *cur_col, *cur_kernel, *cur_output; float* cur_biases = biases ? (float*)(biases + kernel_num) : NULL; cur_kernel = (float*)(kernel + kernel_num * kernel_size); cur_output = (float*)(output + kernel_num * output_xy); for (int col_line = 0; col_line < (col_end & -4); col_line += 4) { cur_col = (float*)(col + col_line * kernel_size); #ifdef __aarch64__ sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, cur_output + col_line, output_xy, activation, 0); #else sgemm_4x4_a17(cur_biases, cur_col, cur_kernel, kernel_size, cur_output + col_line, output_xy, activation, 0); #endif } if (col_end3) { float result[16]; int col_line = col_end & -4; cur_col = (float*)(col + col_line * kernel_size); #ifdef __aarch64__ sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); #else sgemm_4x4_a17(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); #endif for (int i = 0; i < 4; i++) { for (int j = 0; j < (col_end3); j++) *(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j]; } } } if (kernel_end3) { int kernel_num = (kernel_end & -4); float* cur_biases = biases ? (float*)(biases + kernel_num) : NULL; float* cur_kernel = (float*)(kernel + kernel_num * kernel_size); #pragma omp parallel for num_threads(num_thread) for (int col_line = 0; col_line < (col_end & -4); col_line += 4) { float result[16]; float* cur_col = (float*)(col + col_line * kernel_size); #ifdef __aarch64__ sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); #else sgemm_4x4_a17(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); #endif for (int i = 0; i < kernel_end3; i++) for (int j = 0; j < 4; j++) *(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j]; } if (col_end3) { float result[16]; int col_line = col_end & -4; float* cur_col = (float*)(col + col_line * kernel_size); #ifdef __aarch64__ sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); #else sgemm_4x4_a17(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); #endif for (int i = 0; i < (kernel_end3); i++) { for (int j = 0; j < (col_end3); j++) *(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j]; } } } } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int output_chan = param->output_channel; int group = param->group; if (in_h < 7 && in_w < 7) return 0; if (in_h < 10 && in_w < 10 && output_chan < 16) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3) return 0; if (dilation_h != 1 || dilation_w != 1 || stride_h != 1 || stride_w != 1) return 0; return 1; } /* * get the memory size for im2col of input tensor */ int conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param) { int in_h = input->dims[2]; int in_w = input->dims[3]; int out_h = output->dims[2]; int out_w = output->dims[3]; int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int out_cstep = out_h * out_w; // channel cstep, output_h * output_w int elem_size = input->elem_size; // uint8/int8 is 1 byte, fp32 is 4 bytes out_cstep = (out_cstep + 3) / 4 * 4; int mem_size = elem_size * kernel_size * out_cstep + 128; return mem_size; } /* * get the memory size for im2col + sgemm of kernel tensor interleave */ static int get_private_mem_size(struct tensor* filter, struct conv_param* param) { int group = param->group; int out_chan = filter->dims[0] / group; int out_chan_align4 = (out_chan + 3) / 4 * 4; int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3]; int mem_size = kernel_size * filter->elem_size * out_chan_align4 * group + 128; // caution return mem_size; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 0; priv_info->im2col_buffer_pack4 = NULL; priv_info->im2col_buffer_pack4_size = 0; return 0; } int conv_hcl_get_shared_pack4_mem_size(struct tensor* filter, struct tensor* output, struct conv_param* param) { return 0; } int conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { #ifdef __aarch64__ if (in_c >= 256) return wino_conv_hcl_prerun_1(input_tensor, filter_tensor, output_tensor, priv_info, param); else #endif return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } /* alloc mem of im2col */ if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } /* alloc mem of kernel interleave */ if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } /* kernel interleave */ interleave(filter_tensor, priv_info, param); return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { wino_conv_hcl_postrun(priv_info); } if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } return 0; } int conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_h1 = param->pad_h1; int pad_w0 = param->pad_w0; int pad_w1 = param->pad_w1; int act_type = param->activation; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int input_image_size = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3]; if (priv_info->winograd) { #ifdef __aarch64__ if (in_c >= 256) return wino_conv_hcl_run_1(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); else #endif return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } int out_c = output_tensor->dims[1] / group; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); int output_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3]; /* buffer addr */ float* input_buf = (float*)input_tensor->data; float* output_buf = (float*)output_tensor->data; float* biases_buf = NULL; if (bias_tensor != NULL) biases_buf = (float*)bias_tensor->data; float* col_buf = (float*)priv_info->im2col_buffer; float* interleave_buf = (float*)priv_info->interleave_buffer; /* block size split parameter */ int L2_CACHE_SIZE = ((cpu_affinity == TENGINE_CLUSTER_LITTLE) ? 512 : 1024) * 1024; int kernel_size_l1 = kernel_size; int col_cnt_l2 = L2_CACHE_SIZE / 4 / kernel_size_l1 * 7 / 8; col_cnt_l2 = col_cnt_l2 > 4 ? (col_cnt_l2 & -4) : 4; int sgemm_set_chan = out_c / PER_OUT_CHAN * PER_OUT_CHAN; int sgemm_set_remain = out_c % PER_OUT_CHAN; for (int n = 0; n < batch; n++) // batch size { for (int g = 0; g < group; g++) { float* cur_input = input_buf + n * input_image_size + g * input_size; float* cur_kernel = interleave_buf + g * kernel_size * out_c_align; float* cur_output = output_buf + n * output_image_size + g * output_size; float* cur_bias = biases_buf ? (biases_buf + g * out_c) : NULL; /* im2col */ im2col(cur_input, col_buf, in_c, in_w, in_h, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h, pad_w0, pad_w1, pad_h0, pad_h1, out_w, out_h, num_thread); for (int col_i = 0; col_i < out_hw; col_i += col_cnt_l2) { int col_start = col_i; int col_end = col_i + col_cnt_l2; col_end = col_end > out_hw ? out_hw : col_end; /* gemm */ sgemm_set(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, col_start, col_end, 0, sgemm_set_chan, out_hw, act_type, num_thread, cpu_affinity); if (sgemm_set_remain) sgemm4x4(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, col_start, col_end, sgemm_set_chan, out_c, out_hw, act_type, num_thread, cpu_affinity); } } } return 0; }
peel.h
#include "count.h" //buffers/arrays for histogramming edges/edge workload for wing decomposition std::vector<std::array<intB, locBuffSizeLarge>> thdBloomBuff; std::vector<std::array<intE, locBuffSizeLarge>> thdEdgeBuff; std::vector<std::vector<intE>> histCountPerThread; std::vector<intE> histCountGlobal; std::vector<intE> histAccGlobal; std::vector<std::vector<intB>> histWorkPerThread; std::vector<intB> histWorkGlobal; std::vector<intB> histWorkAccGlobal; //for static load balancing std::vector<intB> workBloomSchedule; std::vector<intB> accWorkBloomSchedule; std::vector<intB> partBloomStart; /***************************************************************************** Re-count and generate updates for 2-hop neighbors of deleted vertices Inputs: 1. G-> graph object 2. labels -> vertices not deleted yet 3. activeList -> vertices peeled in this round 4. isActive -> boolean vector mapping a vertex ID to its active status 5. currSupport -> current support of vertices 5. nonNativeSupport -> support from other vertices not included in G 6. wedgeCnt -> 2D array for threads to store wedges while counting Outputs: 1. updateVertexList -> list of vertices whose support values are updated 2. updateValueList -> corresponding values by which support should be reduced ******************************************************************************/ void return_updates_by_counting(Graph &G, std::vector<intV> &labels, std::vector<intV> &activeList, std::vector<uint8_t> &isActive, std::vector<intB> &currSupport, std::vector<intB> &nonNativeSupport, std::vector<intV> &updateVertexList, std::vector<intB> &updateValueList, std::vector<std::vector<intV>> &wedgeCnt) { std::vector<intB> bcnt; count_per_vertex (G, labels, bcnt, wedgeCnt); std::vector<uint8_t> differs(labels.size()); #pragma omp parallel for for (intV i=0; i<labels.size(); i++) { auto v = labels[i]; differs[i] = (((bcnt[v] + nonNativeSupport[v]) != currSupport[v]) && (!G.is_deleted(v)) && (!isActive[v])) ? 1 : 0; } parallel_compact<intV, intV>(labels, differs, updateVertexList); updateValueList.resize(updateVertexList.size()); #pragma omp parallel for for (intV i=0; i<updateVertexList.size(); i++) { auto v = updateVertexList[i]; updateValueList[i] = currSupport[v]-bcnt[v]-nonNativeSupport[v]; } } /***************************************************************************** Peel active vertices and generate updates for 2-hop neighbors of deleted vertices Inputs: 1. G-> graph object 2. labels -> vertices to be peeled but not deleted yet 3. activeList -> vertices peeled in this round 4. isActive -> boolean vector mapping a vertex ID to its active status 5. currSupport -> current support of vertices 6. wedgeCnt -> 2D array for threads to store wedges while counting Outputs: 1. updateVertexList -> list of vertices whose support values are updated 2. updateValueList -> corresponding values by which support should be reduced Other args(they must be initialized to all "falses/zeros"): //can be replaced by sparseAdditiveSet from ligra 1. isUpdated -> boolean vector that maps vertices to their "support updated" status in the current peeling round 2. peelCnts -> store the running count of #butterflies deleted for vertices during peeling ******************************************************************************/ void return_updates_by_peeling(Graph &G, std::vector<intV> &labels, std::vector<intV> &activeList, std::vector<uint8_t> &isActive, std::vector<intB> &currSupport, std::vector<intV> &updateVertexList, std::vector<intB> &updateValueList, std::vector<std::vector<intV>> &wedgeCnt, std::vector<uint8_t> &isUpdated, std::vector<intB> &peelCnts) { std::vector<intV> updatesPerThread (NUM_THREADS, 0); std::vector<intV> offset (NUM_THREADS+1, 0); intV numActiveVertices = activeList.size(); int numActiveThreads = std::min((unsigned int)(numActiveVertices>>1) + 1, NUM_THREADS); intV BS = ((numActiveVertices-1)/numActiveThreads + 1); BS = (BS > 5) ? 5 : BS; #pragma omp parallel num_threads(numActiveThreads) { size_t tid = omp_get_thread_num(); std::vector<intV> tmpVertexList; std::vector<intV> &numW = wedgeCnt[tid]; std::vector<intV> hop2Neighs; hop2Neighs.reserve(8096); #pragma omp for schedule(dynamic, BS) for (intV i=0; i<numActiveVertices; i++) { intV delV = activeList[i]; intV deg; std::vector<intV> &neighList = G.get_neigh(delV, deg); for (intV j=0; j<deg; j++) { intV neigh = neighList[j]; intV neighDeg; std::vector<intV> &neighOfNeighList = G.get_neigh(neigh, neighDeg); for (intV k=0; k<neighDeg; k++) { intV neighOfNeigh = neighOfNeighList[k]; if(isActive[neighOfNeigh] || G.is_deleted(neighOfNeigh)) continue; if (numW[neighOfNeigh]==0) hop2Neighs.push_back(neighOfNeigh); numW[neighOfNeigh] = numW[neighOfNeigh] + 1; } } for (auto x:hop2Neighs) { if (numW[x] >= 2) { intB butterflies = choose2<intB, intV>(numW[x]); if (__sync_bool_compare_and_swap(&isUpdated[x], 0, 1)) tmpVertexList.push_back(x); __sync_fetch_and_add(&peelCnts[x], butterflies); } numW[x] = 0; } hop2Neighs.clear(); } updatesPerThread[tid] = tmpVertexList.size(); #pragma omp barrier #pragma omp single { serial_prefix_sum(offset, updatesPerThread); updateVertexList.clear(); updateValueList.clear(); updateVertexList.resize(offset[NUM_THREADS]); updateValueList.resize(offset[NUM_THREADS]); } #pragma omp barrier for (intV i=0; i<tmpVertexList.size(); i++) { intV vId = tmpVertexList[i]; updateVertexList[offset[tid]+i] = vId; updateValueList[offset[tid]+i] = peelCnts[vId]; } #pragma omp barrier #pragma omp for for (intV i=0; i<offset[NUM_THREADS]; i++) { intV vId = updateVertexList[i]; isUpdated[vId] = 0; peelCnts[vId] = 0; } } } /***************************************************************************** Update the deleted status of vertices peeled in current round Inputs: 1. G-> graph object 2. activeList -> List of vertices peeled Outputs: 1. isActive -> array that maps vertex IDs to their "active" status ******************************************************************************/ void delete_active_vertices(Graph &G, std::vector<intV> &activeList, std::vector<uint8_t> &isActive) { intV numActiveVertices = activeList.size(); #pragma omp parallel for for (intV i=0; i<numActiveVertices; i++) { isActive[activeList[i]] = false; G.delete_vertex(activeList[i]); } } /***************************************************************************** Peel the active vertices and generated count updates to their 2-hop neighbors Choose either re-counting or peeling for update generation Inputs: 1. G-> graph object 2. labels -> vertices to be peeled but not deleted yet 3. activeList -> vertices peeled in this round 4. isActive -> boolean vector mapping a vertex ID to its active status 5. supp -> current support of vertices 5. nonNativeSupport -> support from other vertices not included in G 6. countComplexity -> work required to do a re-count 7. peelWork -> per-vertex work for peeling 6. wedgeCnt -> 2D array for threads to store wedges while counting Outputs: 1. updateVertexList -> list of vertices whose support values are updated 2. updateValueList -> corresponding values by which support should be reduced Other args(they must be initialized to all "falses/zeros"): //can be replaced by sparseAdditiveSet from ligra 1. isUpdated -> boolean vector that maps vertices to their "support updated" status in the current peeling round 2. peelCnts -> store the running count of #butterflies deleted for vertices during peeling ******************************************************************************/ int update_count (Graph &G, std::vector<intV> &labels, std::vector<intV> &activeList, std::vector<uint8_t> &isActive, std::vector<intB> &supp, std::vector<intB> &nonNativeSupport, std::vector<intV> &updateVertexList, std::vector<intB> &updateSupportVal, intB countComplexity, std::vector<intE> &peelWork, std::vector<std::vector<intV>> &wedgeCnt, std::vector<uint8_t> &isUpdated, std::vector<intB> &peelCnts) { intB peelComplexity = 0; #pragma omp parallel for reduction(+:peelComplexity) for (intV i=0; i<activeList.size(); i++) peelComplexity += peelWork[activeList[i]]; bool dontPeel = (countComplexity < peelComplexity); //dontPeel = false; if (dontPeel) { delete_active_vertices(G, activeList, isActive); return_updates_by_counting(G, labels, activeList, isActive, supp, nonNativeSupport, updateVertexList, updateSupportVal, wedgeCnt); return 0; } else { return_updates_by_peeling(G, labels, activeList, isActive, supp, updateVertexList, updateSupportVal, wedgeCnt, isUpdated, peelCnts); delete_active_vertices(G, activeList, isActive); return 1; } } /***************************************************************************** Construct (in parallel) a list of vertices whose support lies between 'lo' and 'hi' Inputs: 1. G-> graph object 2. candidates -> vector of potential vertices that can be activated 3. lo, hi -> range of support to be activated 4. supp -> current support of vertices Outputs: 1. activeList -> list of active vertices 2. isActive -> boolean vector mapping a vertex ID to its active status ******************************************************************************/ void construct_active_list (Graph &G, std::vector<intV> &candidates, intB lo, intB hi, std::vector<intV> &activeList, std::vector<uint8_t> &isActive, std::vector<intB> &supp) { #pragma omp parallel for for (int i=0; i<candidates.size(); i++) { auto v = candidates[i]; if((supp[v]<hi) && (supp[v]>=lo) && (!G.is_deleted(v))) { supp[v] = lo; isActive[v] = 1; } } parallel_compact_kv<intV, intV>(candidates, isActive, activeList); } /***************************************************************************** Peel vertices whose support is in the given range and update support of other vertices Arguments: 1. G-> graph object 2. vertices -> candidate (remaining) vertices on the side being peeled 3. lo, hi -> range of support values to be peeled 4. isActive -> boolean vector mapping a vertex ID to its "active" status 5. supp -> support vector 5. nonNativeSupport -> support from other vertices not included in G 6. countComplexity -> work required to do a re-count 7. peelWork -> per-vertex work for peeling 8. wedgeCnt -> 2D array for threads to store wedges while counting 9. isUpdated -> boolean vector that maps vertices to their "support updated" status in the current peeling round 10. peelCnts -> store the running count of #butterflies deleted for vertices during peeling ******************************************************************************/ intV peel_range (Graph &G, std::vector<intV> &vertices, intB lo, intB hi, std::vector<uint8_t> &isActive, std::vector<intB> &supp, std::vector<intB> &nonNativeSupport, intB countComplexity, std::vector<intE> &peelWork, std::vector<std::vector<intV>> &wedgeCnt, std::vector<uint8_t> &isUpdated, std::vector<intB> &peelCnts) { intV numDeleted = 0; std::vector<intV> activeList; for (auto x : vertices) { assert((supp[x] >= lo) || G.is_deleted(x)); } construct_active_list(G, vertices, lo, hi, activeList, isActive, supp); numDeleted += activeList.size(); //iteratively delete all vertices with tip values in this range// //////////////////////////////////////////////////////////////// std::vector<intV> updateVertexList; std::vector<intB> updateSupportVal; intB edgeDelThresh = ((intB)G.numE*((intB)std::log2(double(G.numV)))); intB peelWorkDone = 0; intV numRounds = 0; intV numPeeled = 0; while(activeList.size() > 0) { //for (auto x:activeList) // printf("deleting %u with support %llu\n", x, supp[x]); numPeeled += update_count(G, vertices, activeList, isActive, supp, nonNativeSupport, updateVertexList, updateSupportVal, countComplexity, peelWork, wedgeCnt, isUpdated, peelCnts); intV numUpdates = updateVertexList.size(); #pragma omp parallel for for (intV i=0; i<numUpdates; i++) { intV v = updateVertexList[i]; intB updateVal = std::min(updateSupportVal[i], supp[v]-lo); supp[v] -= updateVal; } activeList.clear(); construct_active_list(G, updateVertexList, lo, hi, activeList, isActive, supp); numDeleted += activeList.size(); numRounds++; updateVertexList.clear(); updateSupportVal.clear(); } //printf("number of rounds required = %d, peeled = %d, counted = %d\n", numRounds, numPeeled, numRounds-numPeeled); return numDeleted; } /***************************************************************************** Remove deleted vertices from the candidate list and return peeling complexity of remaining vertices Arguments: 1. G -> graph object 2. vertices -> current vertex list (will be updated) 3. peelComplexity -> peeling work required for each vertex (vector) 4. keep -> helper boolean vector to be used in parallel compaction ******************************************************************************/ intB remove_deleted_vertices(Graph &G, std::vector<intV> &vertices, std::vector<intE> &peelComplexity, std::vector<uint8_t>& keep) { keep.resize(vertices.size()); intB remPeelComplexity = 0; #pragma omp parallel for reduction (+:remPeelComplexity) for (intV i=0; i<vertices.size(); i++) { keep[i] = (G.is_deleted(vertices[i])) ? 0 : 1; remPeelComplexity += (keep[i]) ? peelComplexity[vertices[i]] : 0; } parallel_compact_in_place<intV, intV>(vertices, keep); return remPeelComplexity; } /***************************************************************************** //overloaded function defintion //cleans "vertices" vector and also creates a list of deleted vertices //Additional argument - "delVertices" vector ******************************************************************************/ intB remove_deleted_vertices(Graph &G, std::vector<intV> &vertices, std::vector<intE> &peelComplexity, std::vector<uint8_t>& keep, std::vector<intV> &delVertices) { keep.resize(vertices.size()); intB remPeelComplexity = 0; #pragma omp parallel for reduction (+:remPeelComplexity) for (intV i=0; i<vertices.size(); i++) { keep[i] = (G.is_deleted(vertices[i])) ? 0 : 1; remPeelComplexity += (keep[i]) ? peelComplexity[vertices[i]] : 0; } parallel_compact_in_place<intV, intV>(vertices, keep, delVertices); return remPeelComplexity; } /***************************************************************************** 2-approximate tip-decomposition. Peel a range of support values that doubles every round Arguments: 1. G -> graph object 2. tipVal -> half approximation of tip values of vertices must be initialized with the per-vertex butterfly counts 3. peelSide -> 0 implies peeling vertices in U, 1 means V 4. wedgeCnt -> 2D helper array for threads to store wedges ******************************************************************************/ /* void approx_tip_decomposition(Graph &G, std::vector<intB> &tipVal, int peelSide, std::vector<std::vector<intV>> &wedgeCnt) { std::vector<intV> vertices; G.get_labels(vertices, peelSide); std::vector<uint8_t> keep; //std::vector<uint8_t> keep(vertices.size()); printf("number of vertices to peel = %u\n", vertices.size()); //printf("vertices are"); //print_list_horizontal(vertices); std::vector<intE> countWork; std::vector<intE> peelWork; printf("estimating workloads\n"); intB totalCountComplexity = estimate_total_workload(G, countWork, peelWork); std::vector<uint8_t> isActive (G.numT); std::vector<uint8_t> isUpdated(G.numT); std::vector<intB> peelCnts(G.numT); std::vector<intB> nonNativeSupport(G.numT); #pragma omp parallel for for (intV i=0; i<G.numT; i++) { isActive[i] = 0; isUpdated[i] = 0; peelCnts[i] = 0; nonNativeSupport[i] = 0; } intB lo = 0; intB range = 1; intV numDeleted = 0; intV targetDeletion = (peelSide) ? G.numV : G.numU; printf("starting decopmosition\n"); while(numDeleted < targetDeletion) { numDeleted = numDeleted + peel_range(G, vertices, lo, lo+range, isActive, tipVal, nonNativeSupport, totalCountComplexity, peelWork, wedgeCnt, isUpdated, peelCnts); //update range lo = lo + range; range = range*2; intB remPeelComplexity = remove_deleted_vertices(G, vertices, peelWork, keep); } } */ /***************************************************************************** Find the target range to create partition with desired peeling complexity Arguments: 1. vertices -> candidate vertices 2. tipVal -> support value of vertices 3. targetPeelComplexity -> desired amount of work required to peel the partition 4. lowerBound -> lowest tip value 5. peelWork -> work required to peel the vertices ******************************************************************************/ std::tuple<intB, intV, intV> find_range (std::vector<intV> &vertices, std::vector<intB>&tipVal, intB targetPeelComplexity, intB lowerBound, std::vector<intE> &peelWork) { parallel_sort_kv_increasing<intV, intB>(vertices, tipVal); //sort vertices on their current support //compute workload for each bucket - map, prefix sum, scatter //find bucket id for each vertex using map and prefix sum //scatter with atomic add to compute workload for the buckets std::vector<uint8_t> suppIsUniq(vertices.size()); suppIsUniq[suppIsUniq.size()-1] = 1; #pragma omp parallel for for (intV i=0; i<vertices.size()-1; i++) suppIsUniq[i] = (tipVal[vertices[i]]==tipVal[vertices[i+1]]) ? 0 : 1; std::vector<intV> wrOffset; parallel_prefix_sum(wrOffset, suppIsUniq); intV numUniqSuppVals = wrOffset.back(); //last element in the offset vector std::vector<intB> workPerSuppVal(numUniqSuppVals); //work to peel all vertices in a given bucket std::vector<intB> suppVal(numUniqSuppVals); //support value corresponding to the individual buckets #pragma omp parallel { #pragma omp for for (intV i=0; i<numUniqSuppVals; i++) workPerSuppVal[i] = 0; #pragma omp barrier #pragma omp for for (intV i=0; i<vertices.size(); i++) { intV v = vertices[i]; intB work = peelWork[v]; suppVal[wrOffset[i]] = tipVal[v]; __sync_fetch_and_add(&workPerSuppVal[wrOffset[i]], work); } } //none of the vertices with support < lo should've survived assert(suppVal[0] >= lowerBound); //prefix sum to compute work required to peel all vertices till a particular bucket parallel_prefix_sum_inclusive(workPerSuppVal, workPerSuppVal); //find the first bucket with work just lower than the target value intV tgtBktId = std::lower_bound(workPerSuppVal.begin(), workPerSuppVal.end(), targetPeelComplexity) - workPerSuppVal.begin(); intB hi = std::max(suppVal[tgtBktId], suppVal[0]+1); //hi should be greater than the support of the first bucket to ensure non-zero vertex peeling return std::make_tuple(hi, tgtBktId, numUniqSuppVals); } /***************************************************************************** Coarse-grained decomposition with (targetted) equal workload partitions. Arguments: 1. G -> graph object 2. tipVal -> (output) support of vertices when their partition begins peeling must be initialized with the per-vertex butterfly counts 3. peelSide -> 0 implies peeling vertices in U, 1 means V 4. wedgeCnt -> 2D helper array for threads to store wedges 5. numParts -> number of partitions to create; final partitions may be smaller 6. partTipVals -> output vector containing support ranges of the partitions 7. partVertices -> 2D array to store vertices for each partition 8. partPeelWork -> work done to peel the entire partition (considering no re-counting) ******************************************************************************/ int create_balanced_partitions(Graph &G, std::vector<intB> &tipVal, int peelSide, std::vector<std::vector<intV>> &wedgeCnt, int numParts, std::vector<std::pair<intB, intB>> &partTipVals, std::vector<std::vector<intV>> &partVertices, std::vector<intB> &partPeelWork) { std::vector<intV> vertices; G.get_labels(vertices, peelSide); intV targetDeletion = vertices.size(); std::vector<uint8_t> keep; std::vector<uint8_t> isActive(G.numT); std::vector<uint8_t> isUpdated(G.numT); std::vector<intB> peelCnts(G.numT); std::vector<intB> nonNativeSupport(G.numT); #pragma omp parallel for for (intV i=0; i<G.numT; i++) { isActive[i] = 0; isUpdated[i] = 0; peelCnts[i] = 0; nonNativeSupport[i] = 0; } std::vector<intE> countWork; //work required per vertex to count std::vector<intE> peelWork; //work required per vertex to peel, 2-hop neighborhood size intB totalCountComplexity = estimate_total_workload(G, countWork, peelWork); partTipVals.resize(numParts); partPeelWork.resize(numParts); std::vector<intV> verticesPerPart (numParts); std::vector<intB> partTipValInit; //initial values of vertices when their corresponding partitions starts peeling parallel_vec_copy(partTipValInit, tipVal); intB totalPeelComplexity = 0; #pragma omp parallel for reduction(+:totalPeelComplexity) for (intV i=0; i<vertices.size(); i++) totalPeelComplexity += peelWork[vertices[i]]; intB avgPeelComplexityRequired = totalPeelComplexity/numParts; printf("total peel complexity = %lld, count complexity = %lld\n", totalPeelComplexity, totalCountComplexity); intB remPeelComplexity = totalPeelComplexity; intB lo = 0; int numPartsCreated = 0; int numPartsPerThread = numParts/NUM_THREADS; intV numDeleted = 0; //if lot of work done, remove deleted edges to speedup further processing intB edgeDelThresh = ((intB)G.numE)*((intB)std::log2((double)G.numV)); intB peelWorkDone = 0; //helps in adapting the targetWorkComplexity if the partitions become too heavy double scaleFactor = 1.0; //till there is something to peel or only last partition remains while((remPeelComplexity > 0) && (numPartsCreated < numParts-1) && (numDeleted < targetDeletion)) { if (peelWorkDone > edgeDelThresh) { G.delete_edges(); peelWorkDone = 0; } double bktPeelStart = omp_get_wtime(); intB targetPeelComplexity = (intB)((scaleFactor*(double)remPeelComplexity)/(numParts-numPartsCreated)); //figure out target complexity to cover intB desiredPeelComplexity = remPeelComplexity/(numParts-numPartsCreated); intB hi; intV tgtBktId, numUniqSuppVals; std::tie(hi, tgtBktId, numUniqSuppVals) = find_range(vertices, tipVal, targetPeelComplexity, lo, peelWork); //peel the range verticesPerPart[numPartsCreated] = peel_range(G, vertices, lo, hi, isActive, tipVal, nonNativeSupport, totalCountComplexity, peelWork, wedgeCnt, isUpdated, peelCnts); //logistics, track # deleted vertices, record range of the partition numDeleted += verticesPerPart[numPartsCreated]; partTipVals[numPartsCreated] = std::make_pair(lo, hi); intB prevRemPeelComplexity = remPeelComplexity; std::vector<intV> delVertices; remPeelComplexity = remove_deleted_vertices(G, vertices, peelWork, keep, delVertices); partPeelWork[numPartsCreated] = prevRemPeelComplexity - remPeelComplexity; peelWorkDone += partPeelWork[numPartsCreated]; double bktPeelEnd = omp_get_wtime(); #ifdef DEBUG printf("partition id = %d, time taken = %lf, vertices deleted = %u, range from %lld to %lld, desired complexity = %lld, target complexity = %lld, actual work done = %lld\n", numPartsCreated, (bktPeelEnd-bktPeelStart)*1000, verticesPerPart[numPartsCreated], lo, hi, desiredPeelComplexity, targetPeelComplexity, partPeelWork[numPartsCreated]); #endif //adapt, if too much work in this bucket, make targets smaller for the next partition scaleFactor = std::min(((double)targetPeelComplexity)/((double)partPeelWork[numPartsCreated]), 1.0); partVertices.push_back(delVertices); numPartsCreated++; parallel_vec_elems_copy(partTipValInit, tipVal, vertices); //prep for next partition creation lo = hi; } intV remVertices = vertices.size(); intB maxRemSupp = 0; //put anything remaining in the last partition if (remVertices > 0) { partPeelWork[numPartsCreated] = remPeelComplexity; partVertices.push_back(vertices); #pragma omp parallel for reduction(max:maxRemSupp) for (intV i=0; i<remVertices; i++) { maxRemSupp = std::max(tipVal[vertices[i]], maxRemSupp); tipVal[vertices[i]] = lo; } partTipVals[numPartsCreated++] = std::make_pair(lo, maxRemSupp+1); } tipVal.swap(partTipValInit); #pragma omp parallel for for (intV i=0; i<G.numU; i++) G.restore_vertex(G.uLabels[i]); G.restore_edges(); G.sort_adj(); return numPartsCreated; } /***************************************************************************** Print Coarse-grained decomposition details into a binary file Arguments: 1. G -> graph object 2. tipVal -> (output) support of vertices when their partition begins peeling must be initialized with the per-vertex butterfly counts 5. numParts -> number of partitions to create; final partitions may be smaller 6. partTipVals -> output vector containing support ranges of the partitions 7. partVertices -> 2D array to store vertices for each partition 8. partPeelWork -> work done to peel the entire partition (considering no re-counting) ******************************************************************************/ void print_partitioning_details (std::string &filename, Graph &G, std::vector<intB> &tipVal, int numParts, std::vector<std::pair<intB, intB>> &partTipVals, std::vector<std::vector<intV>> &partVertices, std::vector<intB> &partPeelWork) { std::vector<intV> vOut; std::vector<int> pOut; std::vector<intB> tOut; std::vector<intB> pRangeLo; std::vector<intB> pRangeHi; intV numVOut = 0; for (int i=0; i<numParts; i++) { pRangeLo.push_back(partTipVals[i].first); pRangeHi.push_back(partTipVals[i].second); for (intV j=0; j<partVertices[i].size(); j++) { numVOut++; vOut.push_back(partVertices[i][j]); pOut.push_back(i); tOut.push_back(tipVal[partVertices[i][j]]); } } assert(G.numU==numVOut); FILE* fpart = fopen("part_details.bin", "w"); fwrite(&numParts, sizeof(int), 1, fpart); fwrite(&partPeelWork[0], sizeof(intB), numParts, fpart); fwrite(&pRangeLo[0], sizeof(intB), numParts, fpart); fwrite(&pRangeHi[0], sizeof(intB), numParts, fpart); fwrite(&vOut[0], sizeof(intV), numVOut, fpart); fwrite(&pOut[0], sizeof(int), numVOut, fpart); fwrite(&tOut[0], sizeof(intB), numVOut, fpart); fclose (fpart); } /***************************************************************************** Read Coarse-grained decomposition details from a binary file Arguments: 1. G -> graph object 2. tipVal -> (output) support of vertices when their partition begins peeling must be initialized with the per-vertex butterfly counts 5. numParts -> number of partitions to create; final partitions may be smaller 6. partTipVals -> output vector containing support ranges of the partitions 7. partVertices -> 2D array to store vertices for each partition 8. partPeelWork -> work done to peel the entire partition (considering no re-counting) ******************************************************************************/ void read_partitioning_details (std::string &filename, Graph &G, std::vector<intB> &tipVal, int &numParts, std::vector<std::pair<intB, intB>> &partTipVals, std::vector<std::vector<intV>> &partVertices, std::vector<intB> &partPeelWork) { FILE* fcd = fopen(filename.c_str(), "rb"); if (fcd==NULL) { fputs("file error\n", stderr); exit(EXIT_FAILURE); } printf("file opened\n"); int np; fread(&np, sizeof(int), 1, fcd); numParts = np; printf("number of partitions = %d\n", numParts); partPeelWork.resize(numParts); partTipVals.resize(numParts); partVertices.resize(numParts); fread(&partPeelWork[0], sizeof(intB), numParts, fcd); printf("read peel work\n"); for (int i=0; i<numParts; i++) fread(&partTipVals[i].first, sizeof(intB), 1, fcd); for (int i=0; i<numParts; i++) fread(&partTipVals[i].second, sizeof(intB), 1, fcd); printf("read partition ranges\n"); std::vector<intV> vIn (G.numU); std::vector<intV> pIn (G.numU); std::vector<intB> tIn (G.numU); intV bytesRead = fread(&vIn[0], sizeof(intV), G.numU, fcd); printf("number of vertices read = %u\n", bytesRead); assert(bytesRead==G.numU); bytesRead = fread(&pIn[0], sizeof(int), G.numU, fcd); assert(bytesRead==G.numU); printf("read partition map\n"); bytesRead = fread(&tIn[0], sizeof(intB), G.numU, fcd); assert(bytesRead==G.numU); printf("read tipvals\n"); for (intV i=0; i<G.numU; i++) { intV v = vIn[i]; tipVal[v] = tIn[i]; partVertices[pIn[i]].push_back(v); } } /***************************************************************************** Compute upper bound on the maximum wing number Inputs: 1. eIds -> edge indices sorted on current support 2. tipVal -> vector of current support of edges 3. nEdgesRem -> number of not yet peeled edges in eIds vector Outputs: returns an upper bound on max wing number ******************************************************************************/ intE find_upper_bound_wing(std::vector<intE> &eIds, std::vector<intE> &tipVal, intE nEdgesRem) { parallel_unstable_sort_kv_increasing(eIds, tipVal); intE ub = 0; if (nEdgesRem > 10*NUM_THREADS) { intE BS = (nEdgesRem-1)/NUM_THREADS + 1; #pragma omp parallel num_threads(NUM_THREADS) reduction (max:ub) { unsigned tid = omp_get_thread_num(); intE start = tid*BS; intE end = std::min(nEdgesRem, start+BS); for (intE i = end-1; i>=start; i--) { intE currSupp = tipVal[eIds[i]]; intE numEdgesWithHigherSupp = nEdgesRem - i; if (numEdgesWithHigherSupp >= currSupp) { ub = std::max(ub, currSupp); break; } else ub = std::max(std::min(currSupp, numEdgesWithHigherSupp), ub); } } } else { for (intE i=nEdgesRem-1; i>=0; i--) { intE currSupp = tipVal[eIds[i]]; intE numEdgesWithHigherSupp = nEdgesRem - i; if (numEdgesWithHigherSupp >= currSupp) { ub = std::max(ub, currSupp); break; } else ub = std::max(std::min(currSupp, numEdgesWithHigherSupp), ub); } } return ub; } /***************************************************************************** Re-compute upper bound on the maximum wing number and populate histograms for range determination Inputs: 1. eIds -> edge indices sorted on current support 2. tipVal -> vector of current support of edges 3. nEdgesRem -> number of not yet peeled edges in eIds vector 4. minTipVal -> lower bound based on edges peeled so far 5. currUb -> previous upper bound Outputs: returns an upper bound on max wing number ******************************************************************************/ intE update_upper_bound_wing(std::vector<intE> &eIds, intE nEdgesRem, std::vector<intE> &tipVal, intE minTipVal, intE currUb) { intE range = currUb - minTipVal + 1; if (histCountGlobal.size() < range) histCountGlobal.resize(range); if (histAccGlobal.size() < range) histAccGlobal.resize(range); if (histCountPerThread.size() < NUM_THREADS) histCountPerThread.resize(NUM_THREADS); intE edgesPerThread = (nEdgesRem-1)/NUM_THREADS + 1; intE BS = (range-1)/NUM_THREADS + 1; intE newUb = minTipVal; #pragma omp parallel num_threads(NUM_THREADS) { intE tid = omp_get_thread_num(); #pragma omp for for (intE i=0; i<range; i++) histCountGlobal[i] = 0; std::vector<intE> &locHistCount = histCountPerThread[tid]; if (locHistCount.size() < range) locHistCount.resize(range); for (intE i=0; i<range; i++) locHistCount[i] = 0; #pragma omp for for (intE i=0; i<nEdgesRem; i++) { intE val = std::min(tipVal[eIds[i]], currUb) - minTipVal; assert(val < range); //reverse for suffix sum locHistCount[range-val-1]++; } intE ptr = rand()%range; for (intE i=0; i<range; i++) { intE idx = (ptr+i)%range; __sync_fetch_and_add(&histCountGlobal[idx], locHistCount[idx]); } #pragma omp barrier //PREFIX SUM intE start = BS*tid; intE end = std::min((intE)(start+BS), range); if (range > NUM_THREADS*10) { histAccGlobal[start] = histCountGlobal[start]; for (intE i=start+1; i<end; i++) histAccGlobal[i] = histAccGlobal[i-1] + histCountGlobal[i]; #pragma omp barrier #pragma omp single { for (size_t i=1; i<NUM_THREADS; i++) { intE prevEnd = BS*i; if (prevEnd >= range) continue; intE tend = std::min(prevEnd + BS, range); histAccGlobal[tend-1] += histAccGlobal[prevEnd-1]; } } #pragma omp barrier if (tid > 0) { intB blockScan = histAccGlobal[start-1]; for (intE i=start; i<end-1; i++) histAccGlobal[i] += blockScan; } intE locMax = 0; if (end > start) { for (intE i=start; i<end; i++) { intE supp = (range - i - 1) + minTipVal; if (histAccGlobal[i] >= supp) { locMax = supp; break; } } #pragma omp critical { if (locMax > newUb) newUb = locMax; } } } else { #pragma omp single { histAccGlobal[0] = histCountGlobal[0]; for (intE i=1; i<range; i++) histAccGlobal[i] = histAccGlobal[i-1] + histCountGlobal[i]; for (intE i=0; i<range; i++) { intE supp = (range - i - 1) + minTipVal; if (histAccGlobal[i] >= supp) { newUb = supp; break; } } assert(histAccGlobal[range-1]==nEdgesRem); } } } return newUb; } /***************************************************************************** Compute upper bound for the range of a partition Inputs: 1. eIds -> edge indices sorted on current support 2. tipVal -> vector of current support of edges 3. nPartsRem -> number of partitions remaining to be created 4. nEdgesRem -> number of not yet peeled edges in eIds vector 5. scaling -> scaling factor to apply 6. tipMin -> lower bound based on partition's wing number range 7. tipMax -> recently updated upper bound 8. oldMax -> previous upper bound Outputs: 1. range upper bound for the partition 2. estimated work value for the partition based on current edge support ******************************************************************************/ std::tuple<intE, intB> find_upper_bound_part(std::vector<intE> &eIds, std::vector<intE> &tipVal, intE nPartsRem, intE nEdgesRem, double scaling, intE tipMin, intE tipMax, intE oldMax) { intE range = tipMax - tipMin + 1; intE oldRange = oldMax - tipMin + 1; std::vector<intB> &workPerSupp = histWorkGlobal; if (workPerSupp.size() < range) workPerSupp.resize(range); std::vector<intB> &accWork = histWorkAccGlobal; if (accWork.size() < range) accWork.resize(range); intE BS = (range-1)/NUM_THREADS + 1; intE newMaxCount = histCountGlobal[oldMax-tipMax]; #pragma omp parallel num_threads (NUM_THREADS) { unsigned tid = omp_get_thread_num(); //count edges with higher support than new max into the bin of new max value //histCountGlobal[i] is the no. of edges with support oldMax - (tipMin + i) #pragma omp for reduction (+:newMaxCount) for (intE i=0; i<oldMax-tipMax; i++) newMaxCount += histCountGlobal[i]; #pragma omp single { histCountGlobal[oldMax-tipMax] = newMaxCount; } #pragma omp for for (intE i=0; i<range; i++) { intB val = i + tipMin; intE countIdx = oldMax - val; intB edgeCnt = histCountGlobal[countIdx]; workPerSupp[i] = edgeCnt*val; } //PREFIX SUM counts to compute write offsets for each support value if (range < 10*NUM_THREADS) { #pragma omp single { accWork[0] = workPerSupp[0]; for (intE i=1; i<range; i++) accWork[i] = accWork[i-1]+workPerSupp[i]; } } else { intE start = BS*tid; intE end = std::min((intE)(start+BS), range); if (start < range) accWork[start] = workPerSupp[start]; for (intE i=start+1; i<end; i++) accWork[i] = accWork[i-1] + workPerSupp[i]; #pragma omp barrier #pragma omp single { for (size_t i=1; i<NUM_THREADS; i++) { intE prevEnd = BS*i; if (prevEnd >= range) continue; intE tend = std::min(prevEnd + BS, range); accWork[tend-1] += accWork[prevEnd-1]; } } #pragma omp barrier if (tid > 0) { intB blockScan = accWork[start-1]; for (intE i=start; i<end-1; i++) accWork[i] += blockScan; } } } //dynamic average with scaling intB tgtWorkVal = (long long int)(double(accWork[range-1]/nPartsRem)*scaling); //find smallest support value at which work is greater than average intE partUB = (std::lower_bound(accWork.begin(), accWork.begin()+range, tgtWorkVal) - accWork.begin()) + tipMin + 1; tgtWorkVal = accWork[std::min(partUB-tipMin-1, range-1)]; return std::make_tuple(partUB, tgtWorkVal); } //compute scaling factor double compute_scale(std::vector<intE> &partEdges, std::vector<intE> &initSupp, intE maxSupp, intB tgtWork) { intB actualWork = 0; #pragma omp parallel for num_threads(NUM_THREADS) reduction (+:actualWork) for (intE i=0; i<partEdges.size(); i++) actualWork += std::min(initSupp[partEdges[i]], maxSupp); if (actualWork == 0) return 1.0; assert(actualWork >= tgtWork); double scaling = ((double)tgtWork)/((double)actualWork); return scaling; } //find active edges for the first peeling iteration of a partition void find_active_edges(std::vector<intE> &eIds, std::vector<intE> &tipVal, std::vector<uint8_t> &isActive, intE nEdgesRem, intE kLo, intE kHi, std::vector<intE> &activeEdges, intE &activeEdgePtr) { if (thdEdgeBuff.size() < NUM_THREADS) thdEdgeBuff.resize(NUM_THREADS); #pragma omp parallel num_threads(NUM_THREADS) { size_t tid = omp_get_thread_num(); std::array<intE, locBuffSizeLarge> &locBuff = thdEdgeBuff[tid]; unsigned locBuffPtr = 0; #pragma omp for for (intE i=0; i<nEdgesRem; i++) { intE e = eIds[i]; assert(tipVal[e] >= kLo); if (tipVal[e] < kHi) { locBuff[locBuffPtr++] = e; locBuffPtr = updateGlobalQueue(locBuffPtr, locBuffSizeLarge, activeEdgePtr, locBuff, activeEdges); isActive[e] = true; } } if (locBuffPtr > 0) locBuffPtr = updateGlobalQueue(locBuffPtr, locBuffPtr, activeEdgePtr, locBuff, activeEdges); } } /***************************************************************************** Update support of edges in a peeling iteration Inputs: 1. BEG -> BE-Index 2. tipVal -> vector of support of edges 3. kLo, kHi -> partition range 4. activeEdges, activeEdgePtr, activeEdgeStartOffset -> set of edges to Peel 5. isActive -> boolean array to indicate if an edge is active 6. isPeeled -> boolean array to indicate if an edge is already peeled Outputs: 1. updated edge supports 2. updated list of active edges 3. returns a pointer to indicate the newly added active edges in activeEdges[] array Arguments: 1. bloomUpdates -> vector to accumulate updates at blooms 2. activeBlooms -> array to store blooms with non-zero updates ******************************************************************************/ intE update_edge_supp(BEGraphLoMem& BEG, std::vector<intE> &tipVal, intE kLo, intE kHi, std::vector<intE> &activeEdges, intE activeEdgePtr, intE activeEdgeStartOffset, std::vector<intE> &bloomUpdates, std::vector<intB> &activeBlooms, std::vector<uint8_t> &isActive, std::vector<uint8_t> &isPeeled) { intE prevActiveEdgePtr = activeEdgePtr; intB activeBloomPtr = 0; if (thdBloomBuff.size() < NUM_THREADS) thdBloomBuff.resize(NUM_THREADS); if (thdEdgeBuff.size() < NUM_THREADS) thdEdgeBuff.resize(NUM_THREADS); unsigned numBloomParts = NUM_THREADS*50; if (partBloomStart.size() < numBloomParts+1) partBloomStart.resize(numBloomParts+1); if (workBloomSchedule.size() == 0) { workBloomSchedule.resize(BEG.numV); accWorkBloomSchedule.resize(BEG.numV + 1); } #pragma omp parallel num_threads(NUM_THREADS) { size_t tid = omp_get_thread_num(); std::array<intB, locBuffSizeLarge> &locBloomBuff = thdBloomBuff[tid]; unsigned locBloomBuffPtr = 0; std::array<intE, locBuffSizeLarge> &locEdgeBuff = thdEdgeBuff[tid]; unsigned locEdgeBuffPtr = 0; //Explore active edges and activate blooms #pragma omp for schedule (dynamic) for (intE i=activeEdgeStartOffset; i<prevActiveEdgePtr; i++) { intE e = activeEdges[i]; assert(!isPeeled[e]); assert(isActive[e]); intE NeI = BEG.edgeDegree[e]; for (intE j=0; j<NeI; j++) { intB belink = BEG.edgeVI[e]+j; intB bloomId = BEG.edgeEI[belink].first; intE neighEdgeId = BEG.edgeEI[belink].second; if (isPeeled[neighEdgeId] || (BEG.bloomDegree[bloomId]<2)) continue; if (isActive[neighEdgeId] && (neighEdgeId>e)) continue; intE updateVal = BEG.bloomDegree[bloomId]-1; //update neighbor edge intE prevTipVal = tipVal[neighEdgeId]; if (prevTipVal >= kHi) { prevTipVal = __sync_fetch_and_sub(&tipVal[neighEdgeId], updateVal); if ((prevTipVal < kHi + updateVal) && (prevTipVal >= kHi)) { locEdgeBuff[locEdgeBuffPtr++] = neighEdgeId; locEdgeBuffPtr = updateGlobalQueue(locEdgeBuffPtr, locBuffSizeLarge, activeEdgePtr, locEdgeBuff, activeEdges); } } //update bloom intE numDels = __sync_fetch_and_add(&bloomUpdates[bloomId], (intE)1); if (numDels==0) { locBloomBuff[locBloomBuffPtr++] = bloomId; locBloomBuffPtr = updateGlobalQueue(locBloomBuffPtr, locBuffSizeLarge, activeBloomPtr, locBloomBuff, activeBlooms); } } } if (locBloomBuffPtr > 0) locBloomBuffPtr = updateGlobalQueue(locBloomBuffPtr, locBloomBuffPtr, activeBloomPtr, locBloomBuff, activeBlooms); #pragma omp barrier #pragma omp for for (intE i=activeEdgeStartOffset; i<prevActiveEdgePtr; i++) { intE e = activeEdges[i]; isActive[e] = false; isPeeled[e] = true; } //LOAD BALANCING #pragma omp for for (intB i=0; i<activeBloomPtr; i++) workBloomSchedule[i] = BEG.bloomDegree[activeBlooms[i]]; //compute prefix scan int bloomsPerThd = (activeBloomPtr-1)/NUM_THREADS + 1; if (tid==0) accWorkBloomSchedule[0] = 0; #pragma omp barrier if (bloomsPerThd < 10) { #pragma omp single { for (intB i=0; i<activeBloomPtr; i++) accWorkBloomSchedule[i+1] = accWorkBloomSchedule[i] + workBloomSchedule[i]; } } else { intB startBloomIdx = bloomsPerThd*tid+1; intB endBloomIdx = std::min(startBloomIdx + bloomsPerThd, activeBloomPtr+1); accWorkBloomSchedule[startBloomIdx] = workBloomSchedule[startBloomIdx-1]; for (intB i=startBloomIdx+1; i<endBloomIdx; i++) accWorkBloomSchedule[i] = accWorkBloomSchedule[i-1] + workBloomSchedule[i-1]; #pragma omp barrier #pragma omp single { for (size_t i=1; i<NUM_THREADS; i++) { intB prevEnd = bloomsPerThd*i + 1; if (prevEnd > activeBloomPtr) continue; intB tend = std::min(prevEnd + bloomsPerThd, activeBloomPtr+1); accWorkBloomSchedule[tend-1] += accWorkBloomSchedule[prevEnd-1]; } partBloomStart[0] = 0; } #pragma omp barrier if (tid>0) { intB blockScan = accWorkBloomSchedule[startBloomIdx-1]; for (intB i=startBloomIdx; i<endBloomIdx-1; i++) accWorkBloomSchedule[i] += blockScan; } } #pragma omp barrier intB workPerPart = (accWorkBloomSchedule[activeBloomPtr]-1)/numBloomParts + 1; //find task offsets #pragma omp for for (intB i=0; i<numBloomParts; i++) { intB ptrOff = std::lower_bound(accWorkBloomSchedule.begin(), accWorkBloomSchedule.begin()+activeBloomPtr+1, workPerPart*(i+1)) - accWorkBloomSchedule.begin(); partBloomStart[i+1] = std::min(ptrOff, activeBloomPtr); } #pragma omp barrier #pragma omp for for (intB i=0; i<numBloomParts; i++) { assert(partBloomStart[i+1] >= partBloomStart[i]); } #pragma omp barrier //explore active blooms and update edge supports #pragma omp for schedule (dynamic,5) for (intB i=0; i<activeBloomPtr; i++) { intB bloomId = activeBlooms[i]; intE numDels = bloomUpdates[bloomId]; bloomUpdates[bloomId] = 0; intB baseIndex = BEG.bloomVI[bloomId]; for (intE j=0; j<BEG.bloomDegree[bloomId]; j++) { intE e1Id = BEG.bloomEI[baseIndex + j].first; intE e2Id = BEG.bloomEI[baseIndex + j].second; if (isPeeled[e1Id] || isPeeled[e2Id]) { std::swap(BEG.bloomEI[baseIndex+j], BEG.bloomEI[baseIndex+BEG.bloomDegree[bloomId]-1]); j--; BEG.bloomDegree[bloomId]--; continue; } intE prevTipVal = tipVal[e1Id]; if (prevTipVal >= kHi) { prevTipVal = __sync_fetch_and_sub(&tipVal[e1Id], numDels); if ((prevTipVal < kHi + numDels) && (prevTipVal >= kHi)) { locEdgeBuff[locEdgeBuffPtr++] = e1Id; locEdgeBuffPtr = updateGlobalQueue(locEdgeBuffPtr, locBuffSizeLarge, activeEdgePtr, locEdgeBuff, activeEdges); } //else if (prevTipVal < kHi) __sync_fetch_and_add(&tipVal[e1Id], numDels); } prevTipVal = tipVal[e2Id]; if (prevTipVal >= kHi) { prevTipVal = __sync_fetch_and_sub(&tipVal[e2Id], numDels); if ((prevTipVal < kHi + numDels) && (prevTipVal >= kHi)) { locEdgeBuff[locEdgeBuffPtr++] = e2Id; locEdgeBuffPtr = updateGlobalQueue(locEdgeBuffPtr, locBuffSizeLarge, activeEdgePtr, locEdgeBuff, activeEdges); } //else if (prevTipVal < kHi) __sync_fetch_and_add(&tipVal[e2Id], numDels); } } } if (locEdgeBuffPtr > 0) locEdgeBuffPtr = updateGlobalQueue(locEdgeBuffPtr, locEdgeBuffPtr, activeEdgePtr, locEdgeBuff, activeEdges); #pragma omp barrier #pragma omp for for (intE i=prevActiveEdgePtr; i<activeEdgePtr; i++) isActive[activeEdges[i]] = true; } return activeEdgePtr; }
GB_binop__rminus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_int16) // A.*B function (eWiseMult): GB (_AemultB_01__rminus_int16) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_int16) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int16) // A*D function (colscale): GB (_AxD__rminus_int16) // D*A function (rowscale): GB (_DxB__rminus_int16) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_int16) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int16) // C=scalar+B GB (_bind1st__rminus_int16) // C=scalar+B' GB (_bind1st_tran__rminus_int16) // C=A+scalar GB (_bind2nd__rminus_int16) // C=A'+scalar GB (_bind2nd_tran__rminus_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT16 || GxB_NO_RMINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_int16 // A.*B function (eWiseMult): GB_AemultB__lor_int16 // A*D function (colscale): GB_AxD__lor_int16 // D*A function (rowscale): GB_DxB__lor_int16 // C+=B function (dense accum): GB_Cdense_accumB__lor_int16 // C+=b function (dense accum): GB_Cdense_accumb__lor_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int16 // C=scalar+B GB_bind1st__lor_int16 // C=scalar+B' GB_bind1st_tran__lor_int16 // C=A+scalar GB_bind2nd__lor_int16 // C=A'+scalar GB_bind2nd_tran__lor_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT16 || GxB_NO_LOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
void foo(int N, double *A) { #pragma omp parallel default(shared) { #pragma omp for for (int I = 0; I < N; ++I) { A[I] = I; } } }
hermm_c_dia_n_lo_col_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Complex* Y = &y[index2(cc,0,ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i],Y[i],beta); const ALPHA_Complex* X = &x[index2(cc,0,ldx)]; for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d < 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Complex val,val_c; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha); alpha_madde(Y[ar],val_c,X[ac]); alpha_madde(Y[ac],val,X[ar]); } } if(d == 0){ for(ALPHA_INT r = 0; r < mat->rows; ++r){ ALPHA_Complex val; alpha_mul_2c(val,mat->values[index2(di,r,mat->lval)],alpha); alpha_madde(Y[r],val,X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
fill_int2e.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include "config.h" #include "cint.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) #define MIN(I,J) ((I) < (J) ? (I) : (J)) int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter) { int i; int i0 = shls_slice[0]; int i1 = shls_slice[1]; int di = 0; for (i = 1; i < ncenter; i++) { i0 = MIN(i0, shls_slice[i*2 ]); i1 = MAX(i1, shls_slice[i*2+1]); } for (i = i0; i < i1; i++) { di = MAX(di, ao_loc[i+1]-ao_loc[i]); } return di; } int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env) { int i, n; int i0 = shls_slice[0]; int i1 = shls_slice[1]; for (i = 1; i < ncenter; i++) { i0 = MIN(i0, shls_slice[i*2 ]); i1 = MAX(i1, shls_slice[i*2+1]); } int shls[4]; int cache_size = 0; for (i = i0; i < i1; i++) { shls[0] = i; shls[1] = i; shls[2] = i; shls[3] = i; n = (*intor)(NULL, NULL, shls, atm, natm, bas, nbas, env, NULL, NULL); cache_size = MAX(cache_size, n); } return cache_size; } /* ************************************************* * 2e AO integrals in s4, s2ij, s2kl, s1 */ void GTOnr2e_fill_s1(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * nj; size_t nkl = nk * nl; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0 * nj + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh; int shls[4]; double *eri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*nl+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*nl+l0; for (icomp = 0; icomp < comp; icomp++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s2ij(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (ishp < jshp) { return; } int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; //int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; //int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * (ni+1) / 2; size_t nkl = nk * nl; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0*(i0+1)/2 + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh; int shls[4]; double *eri0, *peri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*nl+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*nl+l0; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s2kl(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; //int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; //int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * nj; size_t nkl = nk * (nk+1) / 2; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0 * nj + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh, kshp, lshp; int shls[4]; double *eri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (kshp = 0; kshp < ksh1-ksh0; kshp++) { for (lshp = 0; lshp <= kshp; lshp++) { ksh = kshp + ksh0; lsh = lshp + lsh0; shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { if (kshp > lshp) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*(k0+1)/2+l0; for (icomp = 0; icomp < comp; icomp++) { if (kshp > lshp) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s4(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (ishp < jshp) { return; } int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; //int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; //int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; //int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; //int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * (ni+1) / 2; size_t nkl = nk * (nk+1) / 2; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0*(i0+1)/2 + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh, kshp, lshp; int shls[4]; double *eri0, *peri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (kshp = 0; kshp < ksh1-ksh0; kshp++) { for (lshp = 0; lshp <= kshp; lshp++) { ksh = kshp + ksh0; lsh = lshp + lsh0; shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (kshp > lshp && ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else if (ish > jsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } else if (ksh > lsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (kshp > lshp && ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else if (ish > jsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } else if (ksh > lsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } eri0 += neri; } } } } } static int no_prescreen() { return 1; } void GTOnr2e_fill_drv(int (*intor)(), void (*fill)(), int (*fprescreen)(), double *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (fprescreen == NULL) { fprescreen = no_prescreen; } const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 4); const int cache_size = GTOmax_cache_size(intor, shls_slice, 4, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(fill, fprescreen, eri, intor, comp, \ shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env) { int ij, i, j; double *buf = malloc(sizeof(double) * (di*di*di*di*comp + cache_size)); #pragma omp for nowait schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, fprescreen, eri, buf, comp, i, j, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
mixed_tentusscher_myo_epi_2004_S0.c
// Scenario 0 - Original Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S0.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
HardTanh.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/HardTanh.c" #else static int nn_(HardTanh_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, \ if(*input_data < -1) \ *output_data = -1; \ else if(*input_data <= 1) \ *output_data = *input_data; \ else \ *output_data = 1;); } else { real* output_data = THTensor_(data)(output); real* input_data = THTensor_(data)(input); long k; #pragma omp parallel for private(k) for (k = 0; k < input->size[0]; k++) { real* ptr_output = output_data + k*input->stride[0]; real* ptr_input = input_data + k*input->stride[0]; long i; for (i = 0; i < input->stride[0]; i++) { if(ptr_input[i] < -1) ptr_output[i] = -1; else if (ptr_input[i] <= 1) ptr_output[i] = ptr_input[i]; else ptr_output[i] = 1; } } } return 1; } static int nn_(HardTanh_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THTensor_(resizeAs)(gradInput, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, \ if(*input_data < -1 || *input_data > 1) \ *gradInput_data = 0; \ else \ *gradInput_data = *gradOutput_data;); } else { real* gradOutput_data = THTensor_(data)(gradOutput); real* gradInput_data = THTensor_(data)(gradInput); real* input_data = THTensor_(data)(input); long k; #pragma omp parallel for private(k) for (k = 0; k < input->size[0]; k++) { real* ptr_gradOutput = gradOutput_data + k*input->stride[0]; real* ptr_gradInput = gradInput_data + k*input->stride[0]; real* ptr_input = input_data + k*input->stride[0]; long i; for (i = 0; i < input->stride[0]; i++) { if(ptr_input[i] < -1 || ptr_input[i] > 1) ptr_gradInput[i] = 0; else ptr_gradInput[i] = ptr_gradOutput[i]; } } } return 1; } static const struct luaL_Reg nn_(HardTanh__) [] = { {"HardTanh_updateOutput", nn_(HardTanh_updateOutput)}, {"HardTanh_updateGradInput", nn_(HardTanh_updateGradInput)}, {NULL, NULL} }; static void nn_(HardTanh_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(HardTanh__), "nn"); lua_pop(L,1); } #endif
GB_unop__asin_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asin_fp32_fp32) // op(A') function: GB (_unop_tran__asin_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = asinf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = asinf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = asinf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASIN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asin_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = asinf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = asinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asin_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
print.c
/* Copyright (c) 2010-2011, Jun Namikawa <jnamika@gmail.com> Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "utils.h" #include "print.h" #include "entropy.h" #include "rnn_lyapunov.h" static void fopen_array ( FILE **fp_array, int size, const char *template_filename, const char *mode) { char str[7], *filename, *p; int length = strlen(template_filename); MALLOC(filename, length + 1); strcpy(filename, template_filename); p = strstr(filename, "XXXXXX"); if (p == NULL) { REALLOC(filename, length + 8); filename[length] = '.'; filename[length + 7] = '\0'; p = filename + length + 1; } for (int i = 0; i < size; i++) { snprintf(str, sizeof(str), "%.6d", i); memmove(p, str, 6); fp_array[i] = fopen(filename, mode); if (fp_array[i] == NULL) { print_error_msg(); goto error; } } FREE(filename); return; error: exit(EXIT_FAILURE); } void init_output_files ( const struct general_parameters *gp, const struct recurrent_neural_network *rnn, struct output_files *fp_list, const char *mode) { fp_list->array_size = rnn->series_num; if (strlen(gp->iop.state_filename) > 0) { MALLOC(fp_list->fp_wstate_array, fp_list->array_size); fopen_array(fp_list->fp_wstate_array, fp_list->array_size, gp->iop.state_filename, mode); } else { fp_list->fp_wstate_array = NULL; } if (strlen(gp->iop.closed_state_filename) > 0) { MALLOC(fp_list->fp_wclosed_state_array, fp_list->array_size); fopen_array(fp_list->fp_wclosed_state_array, fp_list->array_size, gp->iop.closed_state_filename, mode); } else { fp_list->fp_wclosed_state_array = NULL; } if (strlen(gp->iop.weight_filename) > 0) { fp_list->fp_wweight = fopen(gp->iop.weight_filename, mode); if (fp_list->fp_wweight == NULL) goto error; } else { fp_list->fp_wweight = NULL; } if (strlen(gp->iop.threshold_filename) > 0) { fp_list->fp_wthreshold = fopen(gp->iop.threshold_filename, mode); if (fp_list->fp_wthreshold == NULL) goto error; } else { fp_list->fp_wthreshold = NULL; } if (strlen(gp->iop.tau_filename) > 0) { fp_list->fp_wtau = fopen(gp->iop.tau_filename, mode); if (fp_list->fp_wtau == NULL) goto error; } else { fp_list->fp_wtau = NULL; } if (strlen(gp->iop.init_filename) > 0) { fp_list->fp_winit = fopen(gp->iop.init_filename, mode); if (fp_list->fp_winit == NULL) goto error; } else { fp_list->fp_winit = NULL; } if (strlen(gp->iop.rep_init_filename) > 0) { fp_list->fp_wrep_init = fopen(gp->iop.rep_init_filename, mode); if (fp_list->fp_wrep_init == NULL) goto error; } else { fp_list->fp_wrep_init = NULL; } if (strlen(gp->iop.adapt_lr_filename) > 0 && gp->mp.use_adaptive_lr) { fp_list->fp_wadapt_lr = fopen(gp->iop.adapt_lr_filename, mode); if (fp_list->fp_wadapt_lr == NULL) goto error; } else { fp_list->fp_wadapt_lr = NULL; } if (strlen(gp->iop.error_filename) > 0) { fp_list->fp_werror = fopen(gp->iop.error_filename, mode); if (fp_list->fp_werror == NULL) goto error; } else { fp_list->fp_werror = NULL; } if (strlen(gp->iop.closed_error_filename) > 0) { fp_list->fp_wclosed_error = fopen(gp->iop.closed_error_filename, mode); if (fp_list->fp_wclosed_error == NULL) goto error; } else { fp_list->fp_wclosed_error = NULL; } if (strlen(gp->iop.lyapunov_filename) > 0) { fp_list->fp_wlyapunov = fopen(gp->iop.lyapunov_filename, mode); if (fp_list->fp_wlyapunov == NULL) goto error; } else { fp_list->fp_wlyapunov = NULL; } if (strlen(gp->iop.entropy_filename) > 0) { fp_list->fp_wentropy = fopen(gp->iop.entropy_filename, mode); if (fp_list->fp_wentropy == NULL) goto error; } else { fp_list->fp_wentropy = NULL; } if (strlen(gp->iop.period_filename) > 0) { fp_list->fp_wperiod = fopen(gp->iop.period_filename, mode); if (fp_list->fp_wperiod == NULL) goto error; } else { fp_list->fp_wperiod = NULL; } return; error: print_error_msg(); exit(EXIT_FAILURE); } void free_output_files (struct output_files *fp_list) { if (fp_list->fp_wstate_array) { for (int i = 0; i < fp_list->array_size; i++) { fclose(fp_list->fp_wstate_array[i]); } FREE(fp_list->fp_wstate_array); } if (fp_list->fp_wclosed_state_array) { for (int i = 0; i < fp_list->array_size; i++) { fclose(fp_list->fp_wclosed_state_array[i]); } FREE(fp_list->fp_wclosed_state_array); } if (fp_list->fp_wweight) { fclose(fp_list->fp_wweight); } if (fp_list->fp_wthreshold) { fclose(fp_list->fp_wthreshold); } if (fp_list->fp_wtau) { fclose(fp_list->fp_wtau); } if (fp_list->fp_winit) { fclose(fp_list->fp_winit); } if (fp_list->fp_wrep_init) { fclose(fp_list->fp_wrep_init); } if (fp_list->fp_wadapt_lr) { fclose(fp_list->fp_wadapt_lr); } if (fp_list->fp_werror) { fclose(fp_list->fp_werror); } if (fp_list->fp_wclosed_error) { fclose(fp_list->fp_wclosed_error); } if (fp_list->fp_wlyapunov) { fclose(fp_list->fp_wlyapunov); } if (fp_list->fp_wentropy) { fclose(fp_list->fp_wentropy); } if (fp_list->fp_wperiod) { fclose(fp_list->fp_wperiod); } } static void print_general_parameters ( FILE *fp, const struct general_parameters *gp) { fprintf(fp, "# seed = %lu\n", gp->mp.seed); if (gp->mp.use_adaptive_lr) { fprintf(fp, "# use_adaptive_lr\n"); } fprintf(fp, "# rho = %f\n", gp->mp.rho); fprintf(fp, "# momentum = %f\n", gp->mp.momentum); fprintf(fp, "# delay_length = %d\n", gp->mp.delay_length); fprintf(fp, "# lambda = %f\n", gp->mp.lambda); fprintf(fp, "# alpha = %f\n", gp->mp.alpha); fprintf(fp, "# truncate_length = %d\n", gp->ap.truncate_length); fprintf(fp, "# block_length = %d\n", gp->ap.block_length); fprintf(fp, "# divide_num = %d\n", gp->ap.divide_num); fprintf(fp, "# lyapunov_spectrum_size = %d\n", gp->ap.lyapunov_spectrum_size); fprintf(fp, "# threshold_period = %g\n", gp->ap.threshold_period); } static void print_rnn_parameters ( FILE *fp, const struct recurrent_neural_network *rnn) { fprintf(fp, "# in_state_size = %d\n", rnn->rnn_p.in_state_size); fprintf(fp, "# c_state_size = %d\n", rnn->rnn_p.c_state_size); fprintf(fp, "# out_state_size = %d\n", rnn->rnn_p.out_state_size); fprintf(fp, "# rep_init_size = %d\n", rnn->rnn_p.rep_init_size); if (rnn->rnn_p.output_type == STANDARD_TYPE) { fprintf(fp, "# output_type = STANDARD_TYPE\n"); } else if (rnn->rnn_p.output_type == SOFTMAX_TYPE) { fprintf(fp, "# output_type = SOFTMAX_TYPE\n"); for (int c = 0; c < rnn->rnn_p.softmax_group_num; c++) { fprintf(fp, "# group%d = ", c); for (int i = 0; i < rnn->rnn_p.out_state_size; i++) { if (rnn->rnn_p.softmax_group_id[i] == c) { fprintf(fp, "%d,", i); } } fprintf(fp, "\n"); } } if (rnn->rnn_p.fixed_weight) { fprintf(fp, "# fixed_weight\n"); } if (rnn->rnn_p.fixed_threshold) { fprintf(fp, "# fixed_threshold\n"); } if (rnn->rnn_p.fixed_tau) { fprintf(fp, "# fixed_tau\n"); } if (rnn->rnn_p.fixed_init_c_state) { fprintf(fp, "# fixed_init_c_state\n"); } fprintf(fp, "# target_num = %d\n", rnn->series_num); for (int i = 0; i < rnn->series_num; i++) { fprintf(fp, "# target %d\tlength = %d\n", i, rnn->rnn_s[i].length); } fprintf(fp, "# prior_strength = %f\n", rnn->rnn_p.prior_strength); fprintf(fp, "# rep_init_variance = %f\n", rnn->rnn_p.rep_init_variance); const struct rnn_parameters *rnn_p = &rnn->rnn_p; for (int i = 0; i < rnn_p->c_state_size; i++) { fprintf(fp, "# const_init_c[%d] = %d\n", i, rnn_p->const_init_c[i]); } for (int i = 0; i < rnn_p->c_state_size; i++) { fprintf(fp, "# connection_weight_ci[%d] = ", i); int I = 0; while (rnn_p->connection_ci[i][I].begin != -1) { int begin = rnn_p->connection_ci[i][I].begin; int end = rnn_p->connection_ci[i][I].end; fprintf(fp, "(%d,%d)", begin, end); I++; } fprintf(fp, "\n"); } for (int i = 0; i < rnn_p->c_state_size; i++) { fprintf(fp, "# connection_weight_cc[%d] = ", i); int I = 0; while (rnn_p->connection_cc[i][I].begin != -1) { int begin = rnn_p->connection_cc[i][I].begin; int end = rnn_p->connection_cc[i][I].end; fprintf(fp, "(%d,%d)", begin, end); I++; } fprintf(fp, "\n"); } for (int i = 0; i < rnn_p->out_state_size; i++) { fprintf(fp, "# connection_weight_oc[%d] = ", i); int I = 0; while (rnn_p->connection_oc[i][I].begin != -1) { int begin = rnn_p->connection_oc[i][I].begin; int end = rnn_p->connection_oc[i][I].end; fprintf(fp, "(%d,%d)", begin, end); I++; } fprintf(fp, "\n"); } } static void print_rnn_weight ( FILE *fp, long epoch, const struct rnn_parameters *rnn_p) { fprintf(fp, "%ld", epoch); for (int i = 0; i < rnn_p->c_state_size; i++) { for (int j = 0; j < rnn_p->in_state_size; j++) { fprintf(fp, "\t%f", rnn_p->weight_ci[i][j]); } for (int j = 0; j < rnn_p->c_state_size; j++) { fprintf(fp, "\t%f", rnn_p->weight_cc[i][j]); } } for (int i = 0; i < rnn_p->out_state_size; i++) { for (int j = 0; j < rnn_p->c_state_size; j++) { fprintf(fp, "\t%f", rnn_p->weight_oc[i][j]); } } fprintf(fp, "\n"); } static void print_rnn_threshold ( FILE *fp, long epoch, const struct rnn_parameters *rnn_p) { fprintf(fp, "%ld", epoch); for (int i = 0; i < rnn_p->c_state_size; i++) { fprintf(fp, "\t%f", rnn_p->threshold_c[i]); } for (int i = 0; i < rnn_p->out_state_size; i++) { fprintf(fp, "\t%f", rnn_p->threshold_o[i]); } fprintf(fp, "\n"); } static void print_rnn_tau ( FILE *fp, long epoch, const struct rnn_parameters *rnn_p) { fprintf(fp, "%ld", epoch); for (int i = 0; i < rnn_p->c_state_size; i++) { fprintf(fp, "\t%g", rnn_p->tau[i]); } fprintf(fp, "\n"); } static void print_rnn_init ( FILE *fp, long epoch, const struct recurrent_neural_network *rnn) { fprintf(fp, "# epoch = %ld\n", epoch); for (int i = 0; i < rnn->series_num; i++) { fprintf(fp, "%d", i); for (int j = 0; j < rnn->rnn_p.rep_init_size; j++) { fprintf(fp, "\t%f", rnn->rnn_s[i].gate_init_c[j]); } for (int j = 0; j < rnn->rnn_p.c_state_size; j++) { fprintf(fp, "\t%f", rnn->rnn_s[i].init_c_inter_state[j]); } fprintf(fp, "\n"); } } static void print_rnn_rep_init ( FILE *fp, long epoch, const struct recurrent_neural_network *rnn) { fprintf(fp, "# epoch = %ld\n", epoch); for (int i = 0; i < rnn->rnn_p.rep_init_size; i++) { fprintf(fp, "%d", i); for (int j = 0; j < rnn->rnn_p.c_state_size; j++) { fprintf(fp, "\t%f", rnn->rnn_p.rep_init_c[i][j]); } fprintf(fp, "\n"); } } static void print_adapt_lr ( FILE *fp, long epoch, double adapt_lr) { fprintf(fp, "%ld\t%f\n", epoch, adapt_lr); } static void print_rnn_error ( FILE *fp, long epoch, const struct recurrent_neural_network *rnn) { double error[rnn->series_num]; #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < rnn->series_num; i++) { error[i] = rnn_get_error(rnn->rnn_s + i); error[i] /= rnn->rnn_s[i].length * rnn->rnn_p.out_state_size; } fprintf(fp, "%ld", epoch); for (int i = 0; i < rnn->series_num; i++) { fprintf(fp, "\t%g", error[i]); } fprintf(fp, "\n"); } static void print_rnn_state ( FILE *fp, const struct rnn_state *rnn_s) { for (int n = 0; n < rnn_s->length; n++) { fprintf(fp, "%d", n); for (int i = 0; i < rnn_s->rnn_p->out_state_size; i++) { fprintf(fp, "\t%f", rnn_s->teach_state[n][i]); fprintf(fp, "\t%f", rnn_s->out_state[n][i]); if (rnn_s->rnn_p->output_type == STANDARD_TYPE) { fprintf(fp, "\t%f", rnn_s->var_state[n][i]); } } for (int i = 0; i < rnn_s->rnn_p->c_state_size; i++) { //fprintf(fp, "\t%f", rnn_s->c_state[n][i]); fprintf(fp, "\t%f", rnn_s->c_inter_state[n][i]); } fprintf(fp, "\n"); } } static void print_rnn_state_forall ( FILE **fp_array, long epoch, const struct recurrent_neural_network *rnn) { #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < rnn->series_num; i++) { fprintf(fp_array[i], "# epoch = %ld\n", epoch); fprintf(fp_array[i], "# target:%d\n", i); print_rnn_state(fp_array[i], rnn->rnn_s + i); } } static void compute_lyapunov_spectrum_of_rnn_state ( const struct rnn_state *rnn_s, int spectrum_size, int delay_length, int truncate_length, double *spectrum) { if (rnn_s->length > truncate_length) { struct rnn_lyapunov_info rl_info; init_rnn_lyapunov_info(&rl_info, rnn_s, delay_length, truncate_length); spectrum = rnn_lyapunov_spectrum(&rl_info, spectrum, spectrum_size); if (spectrum == NULL) { print_error_msg(); exit(EXIT_FAILURE); } free_rnn_lyapunov_info(&rl_info); } else { for (int i = 0; i < spectrum_size; i++) { spectrum[i] = 0; } } } static void print_lyapunov_spectrum_of_rnn ( FILE *fp, long epoch, const struct recurrent_neural_network *rnn, int spectrum_size, int delay_length, int truncate_length) { int max_num; // decides spectrum_size which is the number to evaluate Lyapunov exponents max_num = (rnn->rnn_p.in_state_size * delay_length) + rnn->rnn_p.c_state_size; if (max_num < spectrum_size || spectrum_size < 0) { spectrum_size = max_num; } if (spectrum_size <= 0) return; double **spectrum = NULL; MALLOC2(spectrum, rnn->series_num, spectrum_size); #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < rnn->series_num; i++) { compute_lyapunov_spectrum_of_rnn_state(rnn->rnn_s + i, spectrum_size, delay_length, truncate_length, spectrum[i]); } fprintf(fp, "%ld", epoch); for (int i = 0; i < rnn->series_num; i++) { for (int j = 0; j < spectrum_size; j++) { fprintf(fp, "\t%f", spectrum[i][j]); } } fprintf(fp, "\n"); FREE2(spectrum); } /* assigns an index to the vector with respect to indexed hypercubes in the * R^dimension space */ static inline int f2symbol ( const double x, double min, double max, int divide_num) { int symbol; double mesh_size, split; mesh_size = (max - min)/divide_num; symbol = 0; split = min; for (int i = 0; i < divide_num; i++) { split += mesh_size; if (x <= split || i == divide_num-1) { symbol = i; break; } } return symbol; } static void compute_kl_divergence_of_rnn_state ( const struct rnn_state *rnn_s, int truncate_length, int block_length, int divide_num, double *kl_div, double *entropy_t, double *entropy_o, double *gen_rate) { if (rnn_s->length > truncate_length) { double min, max; int **sequence_t, **sequence_o; struct block_frequency bf_t, bf_o; const int length = rnn_s->length - truncate_length; if (rnn_s->rnn_p->output_type == STANDARD_TYPE) { min = -1.0; max = 1.0; } else { min = 0.0; max = 1.0; } MALLOC2(sequence_t, length, rnn_s->rnn_p->out_state_size); MALLOC2(sequence_o, length, rnn_s->rnn_p->out_state_size); for (int n = 0; n < length; n++) { int N = n + truncate_length; for (int i = 0; i < rnn_s->rnn_p->out_state_size; i++) { sequence_t[n][i] = f2symbol(rnn_s->teach_state[N][i], min, max, divide_num); sequence_o[n][i] = f2symbol(rnn_s->out_state[N][i], min, max, divide_num); } } init_block_frequency(&bf_t, (const int* const*)sequence_t, rnn_s->rnn_p->out_state_size, length, block_length); init_block_frequency(&bf_o, (const int* const*)sequence_o, rnn_s->rnn_p->out_state_size, length, block_length); *kl_div = kullback_leibler_divergence(&bf_t, &bf_o); *entropy_t = block_entropy(&bf_t) / block_length; *entropy_o = block_entropy(&bf_o) / block_length; *gen_rate = generation_rate(&bf_t, &bf_o); free_block_frequency(&bf_t); free_block_frequency(&bf_o); FREE2(sequence_t); FREE2(sequence_o); } else { *kl_div = 0; *entropy_t = 0; *entropy_o = 0; *gen_rate = 0; } } static void print_kl_divergence_of_rnn ( FILE *fp, long epoch, const struct recurrent_neural_network *rnn, int truncate_length, int block_length, int divide_num) { double kl_div[rnn->series_num]; double entropy_t[rnn->series_num]; double entropy_o[rnn->series_num]; double gen_rate[rnn->series_num]; #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < rnn->series_num; i++) { compute_kl_divergence_of_rnn_state(rnn->rnn_s + i, truncate_length, block_length, divide_num, kl_div + i, entropy_t + i, entropy_o + i, gen_rate + i); } fprintf(fp, "%ld", epoch); for (int i = 0; i < rnn->series_num; i++) { fprintf(fp, "\t%g\t%g\t%g\t%g", kl_div[i], gen_rate[i], entropy_t[i], entropy_o[i]); } fprintf(fp, "\n"); } static int get_period_of_rnn_state ( const struct rnn_state *rnn_s, double threshold) { int period = 1; for (int n = rnn_s->length - 2; n >= 0; n--, period++) { double d = 0; for (int i = 0; i < rnn_s->rnn_p->c_state_size; i++) { double x = rnn_s->c_state[rnn_s->length-1][i] - rnn_s->c_state[n][i]; d += x * x; } for (int i = 0; i < rnn_s->rnn_p->out_state_size; i++) { double x = rnn_s->out_state[rnn_s->length-1][i] - rnn_s->out_state[n][i]; d += x * x; } if (d <= threshold) { break; } } return period; } static void print_period_of_rnn ( FILE *fp, long epoch, const struct recurrent_neural_network *rnn, double threshold) { int period[rnn->series_num]; #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < rnn->series_num; i++) { period[i] = get_period_of_rnn_state(rnn->rnn_s + i, threshold); } fprintf(fp, "%ld", epoch); for (int i = 0; i < rnn->series_num; i++) { fprintf(fp, "\t%d", period[i]); } fprintf(fp, "\n"); } static int enable_print ( long epoch, const struct print_interval *pi) { long interval; if (pi->use_logscale_interval) { interval = 1; while (epoch >= 10 * interval) { interval *= 10; } if (interval > pi->interval) { interval = pi->interval; } } else { interval = pi->interval; } return ((epoch % interval) == 0 && epoch >= pi->init && epoch <= pi->end); } static void print_parameters_with_epoch ( long epoch, const struct general_parameters *gp, const struct recurrent_neural_network *rnn, struct output_files *fp_list) { if (fp_list->fp_wweight && enable_print(epoch, &gp->iop.interval_for_weight_file)) { print_rnn_weight(fp_list->fp_wweight, epoch, &rnn->rnn_p); } if (fp_list->fp_wthreshold && enable_print(epoch, &gp->iop.interval_for_threshold_file)) { print_rnn_threshold(fp_list->fp_wthreshold, epoch, &rnn->rnn_p); } if (fp_list->fp_wtau && enable_print(epoch, &gp->iop.interval_for_tau_file)) { print_rnn_tau(fp_list->fp_wtau, epoch, &rnn->rnn_p); } if (fp_list->fp_winit && enable_print(epoch, &gp->iop.interval_for_init_file)) { print_rnn_init(fp_list->fp_winit, epoch, rnn); } if (fp_list->fp_wrep_init && enable_print(epoch, &gp->iop.interval_for_rep_init_file)) { print_rnn_rep_init(fp_list->fp_wrep_init, epoch, rnn); } if (fp_list->fp_wadapt_lr && enable_print(epoch, &gp->iop.interval_for_adapt_lr_file)) { print_adapt_lr(fp_list->fp_wadapt_lr, epoch, gp->inp.adapt_lr); fflush(fp_list->fp_wadapt_lr); } } static void print_open_loop_data_with_epoch ( long epoch, const struct general_parameters *gp, struct recurrent_neural_network *rnn, struct output_files *fp_list) { int compute_forward_dynamics = 0; if (fp_list->fp_werror && enable_print(epoch, &gp->iop.interval_for_error_file)) { if (!compute_forward_dynamics) { rnn_forward_dynamics_forall(rnn); compute_forward_dynamics = 1; } print_rnn_error(fp_list->fp_werror, epoch, rnn); fflush(fp_list->fp_werror); } if (fp_list->fp_wstate_array && enable_print(epoch, &gp->iop.interval_for_state_file)) { if (!compute_forward_dynamics) { rnn_forward_dynamics_forall(rnn); compute_forward_dynamics = 1; } print_rnn_state_forall(fp_list->fp_wstate_array, epoch, rnn); for (int i = 0; i < fp_list->array_size; i++) { fprintf(fp_list->fp_wstate_array[i], "\n"); } } } static void print_closed_loop_data_with_epoch ( long epoch, const struct general_parameters *gp, struct recurrent_neural_network *rnn, struct output_files *fp_list) { int compute_forward_dynamics = 0; if (fp_list->fp_wclosed_error && enable_print(epoch, &gp->iop.interval_for_closed_error_file)) { if (!compute_forward_dynamics) { rnn_forward_dynamics_in_closed_loop_forall(rnn, gp->mp.delay_length); compute_forward_dynamics = 1; } print_rnn_error(fp_list->fp_wclosed_error, epoch, rnn); fflush(fp_list->fp_wclosed_error); } if (fp_list->fp_wclosed_state_array && enable_print(epoch, &gp->iop.interval_for_closed_state_file)) { if (!compute_forward_dynamics) { rnn_forward_dynamics_in_closed_loop_forall(rnn, gp->mp.delay_length); compute_forward_dynamics = 1; } print_rnn_state_forall(fp_list->fp_wclosed_state_array, epoch, rnn); for (int i = 0; i < fp_list->array_size; i++) { fprintf(fp_list->fp_wclosed_state_array[i], "\n"); } } if (fp_list->fp_wlyapunov && enable_print(epoch, &gp->iop.interval_for_lyapunov_file)) { if (!compute_forward_dynamics) { rnn_forward_dynamics_in_closed_loop_forall(rnn, gp->mp.delay_length); compute_forward_dynamics = 1; } print_lyapunov_spectrum_of_rnn(fp_list->fp_wlyapunov, epoch, rnn, gp->ap.lyapunov_spectrum_size, gp->mp.delay_length, gp->ap.truncate_length); fflush(fp_list->fp_wlyapunov); } if (fp_list->fp_wentropy && enable_print(epoch, &gp->iop.interval_for_entropy_file)) { if (!compute_forward_dynamics) { rnn_forward_dynamics_in_closed_loop_forall(rnn, gp->mp.delay_length); compute_forward_dynamics = 1; } print_kl_divergence_of_rnn(fp_list->fp_wentropy, epoch, rnn, gp->ap.truncate_length, gp->ap.block_length, gp->ap.divide_num); fflush(fp_list->fp_wentropy); } if (fp_list->fp_wperiod && enable_print(epoch, &gp->iop.interval_for_period_file)) { if (!compute_forward_dynamics) { rnn_forward_dynamics_in_closed_loop_forall(rnn, gp->mp.delay_length); compute_forward_dynamics = 1; } print_period_of_rnn(fp_list->fp_wperiod, epoch, rnn, gp->ap.threshold_period); fflush(fp_list->fp_wperiod); } } void print_training_main_begin ( const struct general_parameters *gp, const struct recurrent_neural_network *rnn, struct output_files *fp_list) { if (fp_list->fp_wstate_array) { for (int i = 0; i < fp_list->array_size; i++) { fprintf(fp_list->fp_wstate_array[i], "# STATE FILE\n"); print_general_parameters(fp_list->fp_wstate_array[i], gp); print_rnn_parameters(fp_list->fp_wstate_array[i], rnn); } } if (fp_list->fp_wclosed_state_array) { for (int i = 0; i < fp_list->array_size; i++) { fprintf(fp_list->fp_wclosed_state_array[i], "# STATE FILE\n"); print_general_parameters(fp_list->fp_wclosed_state_array[i], gp); print_rnn_parameters(fp_list->fp_wclosed_state_array[i], rnn); } } if (fp_list->fp_wweight) { fprintf(fp_list->fp_wweight, "# WEIGHT FILE\n"); print_general_parameters(fp_list->fp_wweight, gp); print_rnn_parameters(fp_list->fp_wweight, rnn); } if (fp_list->fp_wthreshold) { fprintf(fp_list->fp_wthreshold, "# THRESHOLD FILE\n"); print_general_parameters(fp_list->fp_wthreshold, gp); print_rnn_parameters(fp_list->fp_wthreshold, rnn); } if (fp_list->fp_wtau) { fprintf(fp_list->fp_wtau, "# TAU FILE\n"); print_general_parameters(fp_list->fp_wtau, gp); print_rnn_parameters(fp_list->fp_wtau, rnn); } if (fp_list->fp_winit) { fprintf(fp_list->fp_winit, "# INIT FILE\n"); print_general_parameters(fp_list->fp_winit, gp); print_rnn_parameters(fp_list->fp_winit, rnn); } if (fp_list->fp_wrep_init) { fprintf(fp_list->fp_wrep_init, "# REP INIT FILE\n"); print_general_parameters(fp_list->fp_wrep_init, gp); print_rnn_parameters(fp_list->fp_wrep_init, rnn); } if (fp_list->fp_wadapt_lr) { fprintf(fp_list->fp_wadapt_lr, "# ADAPT_LR FILE\n"); print_general_parameters(fp_list->fp_wadapt_lr, gp); print_rnn_parameters(fp_list->fp_wadapt_lr, rnn); } if (fp_list->fp_werror) { fprintf(fp_list->fp_werror, "# ERROR FILE\n"); print_general_parameters(fp_list->fp_werror, gp); print_rnn_parameters(fp_list->fp_werror, rnn); } if (fp_list->fp_wclosed_error) { fprintf(fp_list->fp_wclosed_error, "# ERROR FILE\n"); print_general_parameters(fp_list->fp_wclosed_error, gp); print_rnn_parameters(fp_list->fp_wclosed_error, rnn); } if (fp_list->fp_wlyapunov) { fprintf(fp_list->fp_wlyapunov, "# LYAPUNOV FILE\n"); print_general_parameters(fp_list->fp_wlyapunov, gp); print_rnn_parameters(fp_list->fp_wlyapunov, rnn); } if (fp_list->fp_wentropy) { fprintf(fp_list->fp_wentropy, "# ENTROPY FILE\n"); print_general_parameters(fp_list->fp_wentropy, gp); print_rnn_parameters(fp_list->fp_wentropy, rnn); } if (fp_list->fp_wperiod) { fprintf(fp_list->fp_wperiod, "# PERIOD FILE\n"); print_general_parameters(fp_list->fp_wperiod, gp); print_rnn_parameters(fp_list->fp_wperiod, rnn); } } void print_training_main_loop ( long epoch, const struct general_parameters *gp, struct recurrent_neural_network *rnn, struct output_files *fp_list) { print_parameters_with_epoch(epoch, gp, rnn, fp_list); print_open_loop_data_with_epoch(epoch, gp, rnn, fp_list); print_closed_loop_data_with_epoch(epoch, gp, rnn, fp_list); }
qsort_arg_mt.c
/* * Imported from PostgreSQL sources by Teodor Sigaev <teodor@sigaev.ru>, <sigaev@corp.mail.ru> */ /* * qsort_arg.c: qsort with a passthrough "void *" argument * * Modifications from vanilla NetBSD source: * Add do ... while() macro fix * Remove __inline, _DIAGASSERTs, __P * Remove ill-considered "swap_cnt" switch to insertion sort, * in favor of a simple check for presorted input. * * CAUTION: if you change this file, see also qsort.c * * $PostgreSQL: pgsql/src/port/qsort_arg.c,v 1.4 2007/03/18 05:36:50 neilc Exp $ */ /* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <qsort_arg.h> #include <stdint.h> #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #ifndef HAVE_OPENMP #error "HAVE_OPENMP macro is not defined" #endif #define min(a, b) (a) < (b) ? a : b static char *med3(char *a, char *b, char *c, int (*cmp)(const void *a, const void *b, void *arg), void *arg); static void swapfunc(char *, char *, size_t, int); /** * @brief Reduce the current number of threads in the thread pool to the * bare minimum. Doesn't prevent the pool from spawning new threads later * if demand mounts. */ static void thread_pool_trim() { /* * Trim OpenMP thread pool. * Though we lack the direct control the workaround below works for * GNU OpenMP library. The library stops surplus threads on entering * a parallel region. Can't go below 2 threads due to the * implementation quirk. */ #pragma omp parallel num_threads(2) ; } /* * Qsort routine based on J. L. Bentley and M. D. McIlroy, * "Engineering a sort function", * Software--Practice and Experience 23 (1993) 1249-1265. * We have modified their original by adding a check for already-sorted input, * which seems to be a win per discussions on pgsql-hackers around 2006-03-21. */ #define swapcode(TYPE, parmi, parmj, n) \ do { \ size_t i = (n) / sizeof (TYPE); \ TYPE *pi = (TYPE *)(void *)(parmi); \ TYPE *pj = (TYPE *)(void *)(parmj); \ do { \ TYPE t = *pi; \ *pi++ = *pj; \ *pj++ = t; \ } while (--i > 0); \ } while (0) #define SWAPINIT(a, es) swaptype = ((char *)(a) - (char *)0) % sizeof(long) || \ (es) % sizeof(long) ? 2 : (es) == sizeof(long)? 0 : 1; static void swapfunc(char *a, char *b, size_t n, int swaptype) { if (swaptype <= 1) swapcode(long, a, b, n); else swapcode(char, a, b, n); } #define swap(a, b) \ if (swaptype == 0) { \ long t = *(long *)(void *)(a); \ *(long *)(void *)(a) = *(long *)(void *)(b); \ *(long *)(void *)(b) = t; \ } else \ swapfunc(a, b, es, swaptype) #define vecswap(a, b, n) if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype) static char * med3(char *a, char *b, char *c, int (*cmp)(const void *a, const void *b, void *arg), void *arg) { return cmp(a, b, arg) < 0 ? (cmp(b, c, arg) < 0 ? b : (cmp(a, c, arg) < 0 ? c : a)) : (cmp(b, c, arg) > 0 ? b : (cmp(a, c, arg) < 0 ? a : c)); } static void qsort_arg_mt_internal(void *a, size_t n, intptr_t es, int (*cmp)(const void *a, const void *b, void *arg), void *arg) { char *pa, *pb, *pc, *pd, *pl, *pm, *pn; intptr_t d, r, swaptype, presorted; loop:SWAPINIT(a, es); if (n < 7) { for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es) for (pl = pm; pl > (char *) a && cmp(pl - es, pl, arg) > 0; pl -= es) swap(pl, pl - es); return; } presorted = 1; for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es) { if (cmp(pm - es, pm, arg) > 0) { presorted = 0; break; } } if (presorted) return; pm = (char *) a + (n / 2) * es; if (n > 7) { pl = (char *) a; pn = (char *) a + (n - 1) * es; if (n > 40) { d = (n / 8) * es; pl = med3(pl, pl + d, pl + 2 * d, cmp, arg); pm = med3(pm - d, pm, pm + d, cmp, arg); pn = med3(pn - 2 * d, pn - d, pn, cmp, arg); } pm = med3(pl, pm, pn, cmp, arg); } swap((char*)a, pm); pa = pb = (char *) a + es; pc = pd = (char *) a + (n - 1) * es; for (;;) { while (pb <= pc && (r = cmp(pb, a, arg)) <= 0) { if (r == 0) { swap(pa, pb); pa += es; } pb += es; } while (pb <= pc && (r = cmp(pc, a, arg)) >= 0) { if (r == 0) { swap(pc, pd); pd -= es; } pc -= es; } if (pb > pc) break; swap(pb, pc); pb += es; pc -= es; } pn = (char *) a + n * es; r = min(pa - (char *) a, pb - pa); vecswap((char*)a, pb - r, r); r = min(pd - pc, pn - pd - es); vecswap(pb, pn - r, r); if ((r = pb - pa) > es) { #pragma omp task qsort_arg_mt_internal(a, r / es, es, cmp, arg); } if ((r = pd - pc) > es) { /* Iterate rather than recurse to save stack space */ a = pn - r; n = r / es; goto loop; } } void qsort_arg_mt(void *a, size_t n, size_t es, int (*cmp)(const void *a, const void *b, void *arg), void *arg) { #pragma omp parallel { #pragma omp single qsort_arg_mt_internal(a, n, es, cmp, arg); } thread_pool_trim(); } #if defined(__cplusplus) } #endif /* defined(__cplusplus) */
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) { for (t4=max(max(max(0,ceild(3*t1-511,512)),ceild(24*t2-Nz-2044,2048)),ceild(32*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(12*t1+Nx+21,2048)),floord(24*t2+Nx+20,2048)),floord(32*t3+Nx+28,2048)),floord(24*t1-24*t2+Nz+Nx+19,2048));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),2048*t4+2046),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
jacobi-ompacc-multiGPU.c
/* * Rectangular matrix multiplication, started from MIT Cilk matmul.cilk example * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "omp.h" #include <pthread.h> #include <string.h> #define REAL double /* in second */ #define read_timer() omp_get_wtime() /* in ms */ #define read_timer_ms() (omp_get_wtime()*1000.0) #define MAX_GPU_COUNT 4 int mits=5000; REAL tol=0.0000000001,relax=1.0,alpha=0.0543; REAL error_ref= 9.213041E-04, resid_ref = 2.355794E-08; // depending on MSIZE and precision (double vs. float) !! void initialize(REAL *u, REAL *uold, REAL *f, REAL* dx, REAL* dy, int m, int n) { int i,j, xx,yy; //double PI=3.1415926; *dx = 2.0 / (n-1); *dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ #pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + *dx * (i-1)); yy = (int)(-1.0 + *dy * (j-1)) ; u[j + i * m] = 0.0; uold[j + i * m] = 0.0; f[j + i * m] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } double maxerror(REAL *A, REAL *B, int n) { int i, j; double error = 0.0; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { double diff = (A[i * n + j] - B[i * n + j]) / A[i * n + j]; // printf("%4f -- %4f\n", A[i*n+j], B[i*n+j]); if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void jacobi_GPU(REAL *u, REAL *uold, REAL *f, REAL dx, REAL dy, int offset, int m, int n, double* error) { double omega; int i,j,k; double resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ int tid = omp_get_thread_num(); double err_tmp = 0.0; //#pragma omp parallel { //#pragma omp target device (tid) map(tofrom: u[offset*m:m*n]) map(to: uold[offset*m:m*n],f[offset*m:m*n],m,n, offset,ax,ay,b,omega) #pragma omp parallel for shared(uold,u,f, offset, ax,ay,b,omega,n) private(resid,j,i) reduction(+:err_tmp) for (i=offset+1;i<offset+(n-1);i++) { for (j=1;j<(m-1);j++) { resid = (ax*(uold[j + (i-1)*m] + uold[j + (i+1)*m])\ + ay*(uold[j-1 + i*m] + uold[j+1 + i*m])+ b * uold[j + i*m] - f[j+i*m])/b; u[j+i*m] = uold[j+i*m] - omega * resid; err_tmp = err_tmp + resid*resid ; } } } /* omp end parallel */ *error += err_tmp; } void jacobi_omp(REAL *u, REAL *uold, REAL *f, REAL dx, REAL dy, int m, int n) { double omega; int i,j,k; double error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; REAL* tmp; while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ tmp = u; u = uold; uold = tmp; #pragma omp parallel { #pragma omp for private(resid,j,i) reduction(+:error) nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[j + (i-1)*m] + uold[j + (i+1)*m])\ + ay*(uold[j-1 + i*m] + uold[j+1 + i*m])+ b * uold[j + i*m] - f[j+i*m])/b; u[j+i*m] = uold[j+i*m] - omega * resid; error = error + resid*resid ; } } /* omp end parallel */ /* Error check */ k = k + 1; if (k%500==0) printf("OMP_run: finished %d iteration.\n",k); error = sqrt(error)/(n*m); // printf("%d %e %e\n",k,error,tol); } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); printf("Residual_ref :%E\n", resid_ref); printf ("Diff ref=%E\n", fabs(error-resid_ref)); assert (fabs(error-resid_ref)/resid_ref < 1E-5); } int main(int argc, char *argv[]) { int m,n, idev; int num_threads; REAL *u, *uold, *f; REAL dx,dy; double omp_for_elapsed, acc_elapsed; int halosize = 1; // if (argc != 3) { // fprintf(stderr, "Usage: matmul <m> <n>\n"); // exit(1); // } // m = atoi(argv[1]); // n = atoi(argv[2]); m = 512; n = 512; u = (REAL*)malloc(m * n * sizeof(REAL)); uold = (REAL*)malloc(m * n * sizeof(REAL)); f = (REAL*)malloc(m * n * sizeof(REAL)); /* openmp parallel for version */ initialize(u, uold, f, &dx, &dy, m, n); omp_for_elapsed = omp_get_wtime(); jacobi_omp(u, uold, f, dx, dy, m,n); omp_for_elapsed = omp_get_wtime() - omp_for_elapsed; initialize(u, uold, f, &dx, &dy, m, n); int GPU_N = 0; cudaGetDeviceCount(&GPU_N); if (GPU_N > MAX_GPU_COUNT) { GPU_N = MAX_GPU_COUNT; } printf("CUDA-capable device count: %i\n", GPU_N); omp_set_num_threads(GPU_N); #pragma omp parallel { #pragma omp master { num_threads = omp_get_num_threads(); } } //double ompacc_time = read_timer(); acc_elapsed = omp_get_wtime(); REAL* tmp; double* error; double error_sum; error = (double*)malloc(sizeof(double)*GPU_N); #pragma omp parallel shared (GPU_N, u, uold, f, m, n, error,error_sum) private(idev) // for (idev = 0; idev < GPU_N; idev++) { int tid = omp_get_thread_num(); cudaSetDevice(tid); int size = n / GPU_N; int offset = size * tid; if(tid < n%GPU_N) { size++; } if(tid >= n%GPU_N) offset += n%GPU_N; else offset += tid; if(tid != 0) offset = offset - halosize; size = size + halosize; if(tid != GPU_N-1 && tid != 0) size = size + halosize; printf("thread %d working on GPU devices %d with size %d copying data from y_ompacc with offset %d\n",tid, tid, size,offset); int k; k = 1; error_sum = 10.0 * tol; /* Copy new solution into old */ while ((k<=mits)&&(error_sum>tol)) { #pragma omp barrier #pragma omp master { tmp = u; u = uold; uold = tmp; error_sum = 0.0; } error[tid] = 0.0; #pragma omp barrier jacobi_GPU(u, uold, f, dx, dy, offset, m,size,&error[tid]); /* Error check */ k = k + 1; #pragma omp master { if (k%500==0) printf("GPU_run: finished %d iteration.\n",k); } #pragma omp critical { error_sum += error[tid]; // printf("thread %d error = %f\n",tid, error_sum); } #pragma omp barrier #pragma omp master { error_sum = sqrt(error_sum)/(n*m); // printf("%d %e %e\n",k,error_sum,tol); } } /* End iteration loop */ #pragma omp critical #pragma omp master { printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error_sum); printf("Residual_ref :%E\n", resid_ref); printf ("Diff ref=%E\n", fabs(error_sum-resid_ref)); assert (fabs(error_sum-resid_ref)/resid_ref < 1E-5); } } // end of idev loop acc_elapsed = omp_get_wtime() - acc_elapsed; free(error); printf("=======================================================================\n"); printf("\t\tmatmul(%dx%d) example on %d threads(cores)\n", n, n, num_threads); printf("-----------------------------------------------------------------------\n"); printf("Performance: Runtime (s)\t MFLOPS\t\t\t Error\n"); printf("-----------------------------------------------------------------------\n"); // printf("Sequential : %4f \t\t %4f\t\t%g\n", seq_elapsed, 2.0 * n * n * n / (1.0e6 * (seq_elapsed)), maxerror(C_seq, C_seq, n)); printf("OMP For : %4f \t\t %4f\t\t\n", omp_for_elapsed, 2.0 * n * n * n / (1.0e6 * (omp_for_elapsed))); printf("ACC For : %4f \t\t %4f\t\t\n", acc_elapsed, 2.0 * n * n * n / (1.0e6 * (acc_elapsed))); free(u); free(uold); free(f); return 0; }
convolutiondepthwise_3x3_pack16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m512 _bias0 = bias ? _mm512_loadu_ps((const float*)bias + g * 16) : _mm512_setzero_ps(); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); float* outptr1 = out.row(1); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); __m512 _k00 = _mm512_load_ps(k0); __m512 _k01 = _mm512_load_ps(k0 + 16); __m512 _k02 = _mm512_load_ps(k0 + 32); __m512 _k10 = _mm512_load_ps(k0 + 48); __m512 _k11 = _mm512_load_ps(k0 + 64); __m512 _k12 = _mm512_load_ps(k0 + 80); __m512 _k20 = _mm512_load_ps(k0 + 96); __m512 _k21 = _mm512_load_ps(k0 + 112); __m512 _k22 = _mm512_load_ps(k0 + 128); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { __m512 _sum00 = _bias0; __m512 _sum01 = _bias0; __m512 _sum02 = _bias0; __m512 _sum03 = _bias0; __m512 _sum10 = _bias0; __m512 _sum11 = _bias0; __m512 _sum12 = _bias0; __m512 _sum13 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); __m512 _r03 = _mm512_load_ps(r0 + 48); __m512 _r04 = _mm512_load_ps(r0 + 64); __m512 _r05 = _mm512_load_ps(r0 + 80); _sum00 = _mm512_fmadd_ps(_k00, _r00, _sum00); _sum01 = _mm512_fmadd_ps(_k00, _r01, _sum01); _sum02 = _mm512_fmadd_ps(_k00, _r02, _sum02); _sum03 = _mm512_fmadd_ps(_k00, _r03, _sum03); _sum00 = _mm512_fmadd_ps(_k01, _r01, _sum00); _sum01 = _mm512_fmadd_ps(_k01, _r02, _sum01); _sum02 = _mm512_fmadd_ps(_k01, _r03, _sum02); _sum03 = _mm512_fmadd_ps(_k01, _r04, _sum03); _sum00 = _mm512_fmadd_ps(_k02, _r02, _sum00); _sum01 = _mm512_fmadd_ps(_k02, _r03, _sum01); _sum02 = _mm512_fmadd_ps(_k02, _r04, _sum02); _sum03 = _mm512_fmadd_ps(_k02, _r05, _sum03); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); __m512 _r13 = _mm512_load_ps(r1 + 48); __m512 _r14 = _mm512_load_ps(r1 + 64); __m512 _r15 = _mm512_load_ps(r1 + 80); _sum10 = _mm512_fmadd_ps(_k00, _r10, _sum10); _sum11 = _mm512_fmadd_ps(_k00, _r11, _sum11); _sum12 = _mm512_fmadd_ps(_k00, _r12, _sum12); _sum13 = _mm512_fmadd_ps(_k00, _r13, _sum13); _sum00 = _mm512_fmadd_ps(_k10, _r10, _sum00); _sum01 = _mm512_fmadd_ps(_k10, _r11, _sum01); _sum02 = _mm512_fmadd_ps(_k10, _r12, _sum02); _sum03 = _mm512_fmadd_ps(_k10, _r13, _sum03); _sum10 = _mm512_fmadd_ps(_k01, _r11, _sum10); _sum11 = _mm512_fmadd_ps(_k01, _r12, _sum11); _sum12 = _mm512_fmadd_ps(_k01, _r13, _sum12); _sum13 = _mm512_fmadd_ps(_k01, _r14, _sum13); _sum00 = _mm512_fmadd_ps(_k11, _r11, _sum00); _sum01 = _mm512_fmadd_ps(_k11, _r12, _sum01); _sum02 = _mm512_fmadd_ps(_k11, _r13, _sum02); _sum03 = _mm512_fmadd_ps(_k11, _r14, _sum03); _sum10 = _mm512_fmadd_ps(_k02, _r12, _sum10); _sum11 = _mm512_fmadd_ps(_k02, _r13, _sum11); _sum12 = _mm512_fmadd_ps(_k02, _r14, _sum12); _sum13 = _mm512_fmadd_ps(_k02, _r15, _sum13); _sum00 = _mm512_fmadd_ps(_k12, _r12, _sum00); _sum01 = _mm512_fmadd_ps(_k12, _r13, _sum01); _sum02 = _mm512_fmadd_ps(_k12, _r14, _sum02); _sum03 = _mm512_fmadd_ps(_k12, _r15, _sum03); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); __m512 _r23 = _mm512_load_ps(r2 + 48); __m512 _r24 = _mm512_load_ps(r2 + 64); __m512 _r25 = _mm512_load_ps(r2 + 80); _sum10 = _mm512_fmadd_ps(_k10, _r20, _sum10); _sum11 = _mm512_fmadd_ps(_k10, _r21, _sum11); _sum12 = _mm512_fmadd_ps(_k10, _r22, _sum12); _sum13 = _mm512_fmadd_ps(_k10, _r23, _sum13); _sum00 = _mm512_fmadd_ps(_k20, _r20, _sum00); _sum01 = _mm512_fmadd_ps(_k20, _r21, _sum01); _sum02 = _mm512_fmadd_ps(_k20, _r22, _sum02); _sum03 = _mm512_fmadd_ps(_k20, _r23, _sum03); _sum10 = _mm512_fmadd_ps(_k11, _r21, _sum10); _sum11 = _mm512_fmadd_ps(_k11, _r22, _sum11); _sum12 = _mm512_fmadd_ps(_k11, _r23, _sum12); _sum13 = _mm512_fmadd_ps(_k11, _r24, _sum13); _sum00 = _mm512_fmadd_ps(_k21, _r21, _sum00); _sum01 = _mm512_fmadd_ps(_k21, _r22, _sum01); _sum02 = _mm512_fmadd_ps(_k21, _r23, _sum02); _sum03 = _mm512_fmadd_ps(_k21, _r24, _sum03); _sum10 = _mm512_fmadd_ps(_k12, _r22, _sum10); _sum11 = _mm512_fmadd_ps(_k12, _r23, _sum11); _sum12 = _mm512_fmadd_ps(_k12, _r24, _sum12); _sum13 = _mm512_fmadd_ps(_k12, _r25, _sum13); _sum00 = _mm512_fmadd_ps(_k22, _r22, _sum00); _sum01 = _mm512_fmadd_ps(_k22, _r23, _sum01); _sum02 = _mm512_fmadd_ps(_k22, _r24, _sum02); _sum03 = _mm512_fmadd_ps(_k22, _r25, _sum03); __m512 _r30 = _mm512_load_ps(r3); __m512 _r31 = _mm512_load_ps(r3 + 16); __m512 _r32 = _mm512_load_ps(r3 + 32); __m512 _r33 = _mm512_load_ps(r3 + 48); __m512 _r34 = _mm512_load_ps(r3 + 64); __m512 _r35 = _mm512_load_ps(r3 + 80); _sum10 = _mm512_fmadd_ps(_k20, _r30, _sum10); _sum11 = _mm512_fmadd_ps(_k20, _r31, _sum11); _sum12 = _mm512_fmadd_ps(_k20, _r32, _sum12); _sum13 = _mm512_fmadd_ps(_k20, _r33, _sum13); _sum10 = _mm512_fmadd_ps(_k21, _r31, _sum10); _sum11 = _mm512_fmadd_ps(_k21, _r32, _sum11); _sum12 = _mm512_fmadd_ps(_k21, _r33, _sum12); _sum13 = _mm512_fmadd_ps(_k21, _r34, _sum13); _sum10 = _mm512_fmadd_ps(_k22, _r32, _sum10); _sum11 = _mm512_fmadd_ps(_k22, _r33, _sum11); _sum12 = _mm512_fmadd_ps(_k22, _r34, _sum12); _sum13 = _mm512_fmadd_ps(_k22, _r35, _sum13); _mm512_store_ps(outptr0, _sum00); _mm512_store_ps(outptr0 + 16, _sum01); _mm512_store_ps(outptr0 + 32, _sum02); _mm512_store_ps(outptr0 + 48, _sum03); _mm512_store_ps(outptr1, _sum10); _mm512_store_ps(outptr1 + 16, _sum11); _mm512_store_ps(outptr1 + 32, _sum12); _mm512_store_ps(outptr1 + 48, _sum13); r0 += 64; r1 += 64; r2 += 64; r3 += 64; outptr0 += 64; outptr1 += 64; } for (; j + 1 < outw; j += 2) { __m512 _sum00 = _bias0; __m512 _sum01 = _bias0; __m512 _sum10 = _bias0; __m512 _sum11 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); __m512 _r03 = _mm512_load_ps(r0 + 48); _sum00 = _mm512_fmadd_ps(_k00, _r00, _sum00); _sum01 = _mm512_fmadd_ps(_k00, _r01, _sum01); _sum00 = _mm512_fmadd_ps(_k01, _r01, _sum00); _sum01 = _mm512_fmadd_ps(_k01, _r02, _sum01); _sum00 = _mm512_fmadd_ps(_k02, _r02, _sum00); _sum01 = _mm512_fmadd_ps(_k02, _r03, _sum01); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); __m512 _r13 = _mm512_load_ps(r1 + 48); _sum00 = _mm512_fmadd_ps(_k10, _r10, _sum00); _sum01 = _mm512_fmadd_ps(_k10, _r11, _sum01); _sum10 = _mm512_fmadd_ps(_k00, _r10, _sum10); _sum11 = _mm512_fmadd_ps(_k00, _r11, _sum11); _sum00 = _mm512_fmadd_ps(_k11, _r11, _sum00); _sum01 = _mm512_fmadd_ps(_k11, _r12, _sum01); _sum10 = _mm512_fmadd_ps(_k01, _r11, _sum10); _sum11 = _mm512_fmadd_ps(_k01, _r12, _sum11); _sum00 = _mm512_fmadd_ps(_k12, _r12, _sum00); _sum01 = _mm512_fmadd_ps(_k12, _r13, _sum01); _sum10 = _mm512_fmadd_ps(_k02, _r12, _sum10); _sum11 = _mm512_fmadd_ps(_k02, _r13, _sum11); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); __m512 _r23 = _mm512_load_ps(r2 + 48); _sum00 = _mm512_fmadd_ps(_k20, _r20, _sum00); _sum01 = _mm512_fmadd_ps(_k20, _r21, _sum01); _sum10 = _mm512_fmadd_ps(_k10, _r20, _sum10); _sum11 = _mm512_fmadd_ps(_k10, _r21, _sum11); _sum00 = _mm512_fmadd_ps(_k21, _r21, _sum00); _sum01 = _mm512_fmadd_ps(_k21, _r22, _sum01); _sum10 = _mm512_fmadd_ps(_k11, _r21, _sum10); _sum11 = _mm512_fmadd_ps(_k11, _r22, _sum11); _sum00 = _mm512_fmadd_ps(_k22, _r22, _sum00); _sum01 = _mm512_fmadd_ps(_k22, _r23, _sum01); _sum10 = _mm512_fmadd_ps(_k12, _r22, _sum10); _sum11 = _mm512_fmadd_ps(_k12, _r23, _sum11); __m512 _r30 = _mm512_load_ps(r3); __m512 _r31 = _mm512_load_ps(r3 + 16); __m512 _r32 = _mm512_load_ps(r3 + 32); __m512 _r33 = _mm512_load_ps(r3 + 48); _sum10 = _mm512_fmadd_ps(_k20, _r30, _sum10); _sum11 = _mm512_fmadd_ps(_k20, _r31, _sum11); _sum10 = _mm512_fmadd_ps(_k21, _r31, _sum10); _sum11 = _mm512_fmadd_ps(_k21, _r32, _sum11); _sum10 = _mm512_fmadd_ps(_k22, _r32, _sum10); _sum11 = _mm512_fmadd_ps(_k22, _r33, _sum11); _mm512_store_ps(outptr0, _sum00); _mm512_store_ps(outptr0 + 16, _sum01); _mm512_store_ps(outptr1, _sum10); _mm512_store_ps(outptr1 + 16, _sum11); r0 += 32; r1 += 32; r2 += 32; r3 += 32; outptr0 += 32; outptr1 += 32; } for (; j < outw; j++) { __m512 _sum0 = _bias0; __m512 _sum1 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); _sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); _sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0); _sum1 = _mm512_fmadd_ps(_k00, _r10, _sum1); _sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0); _sum1 = _mm512_fmadd_ps(_k01, _r11, _sum1); _sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0); _sum1 = _mm512_fmadd_ps(_k02, _r12, _sum1); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); _sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0); _sum1 = _mm512_fmadd_ps(_k10, _r20, _sum1); _sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0); _sum1 = _mm512_fmadd_ps(_k11, _r21, _sum1); _sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0); _sum1 = _mm512_fmadd_ps(_k12, _r22, _sum1); __m512 _r30 = _mm512_load_ps(r3); __m512 _r31 = _mm512_load_ps(r3 + 16); __m512 _r32 = _mm512_load_ps(r3 + 32); _sum1 = _mm512_fmadd_ps(_k20, _r30, _sum1); _sum1 = _mm512_fmadd_ps(_k21, _r31, _sum1); _sum1 = _mm512_fmadd_ps(_k22, _r32, _sum1); _mm512_store_ps(outptr0, _sum0); _mm512_store_ps(outptr1, _sum1); r0 += 16; r1 += 16; r2 += 16; r3 += 16; outptr0 += 16; outptr1 += 16; } r0 += 2 * 16 + w * 16; r1 += 2 * 16 + w * 16; r2 += 2 * 16 + w * 16; r3 += 2 * 16 + w * 16; outptr0 += outw * 16; outptr1 += outw * 16; } for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m512 _sum0 = _bias0; __m512 _sum1 = _bias0; __m512 _sum2 = _bias0; __m512 _sum3 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); __m512 _r03 = _mm512_load_ps(r0 + 48); __m512 _r04 = _mm512_load_ps(r0 + 64); __m512 _r05 = _mm512_load_ps(r0 + 80); _sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0); _sum1 = _mm512_fmadd_ps(_k00, _r01, _sum1); _sum2 = _mm512_fmadd_ps(_k00, _r02, _sum2); _sum3 = _mm512_fmadd_ps(_k00, _r03, _sum3); _sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0); _sum1 = _mm512_fmadd_ps(_k01, _r02, _sum1); _sum2 = _mm512_fmadd_ps(_k01, _r03, _sum2); _sum3 = _mm512_fmadd_ps(_k01, _r04, _sum3); _sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0); _sum1 = _mm512_fmadd_ps(_k02, _r03, _sum1); _sum2 = _mm512_fmadd_ps(_k02, _r04, _sum2); _sum3 = _mm512_fmadd_ps(_k02, _r05, _sum3); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); __m512 _r13 = _mm512_load_ps(r1 + 48); __m512 _r14 = _mm512_load_ps(r1 + 64); __m512 _r15 = _mm512_load_ps(r1 + 80); _sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0); _sum1 = _mm512_fmadd_ps(_k10, _r11, _sum1); _sum2 = _mm512_fmadd_ps(_k10, _r12, _sum2); _sum3 = _mm512_fmadd_ps(_k10, _r13, _sum3); _sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0); _sum1 = _mm512_fmadd_ps(_k11, _r12, _sum1); _sum2 = _mm512_fmadd_ps(_k11, _r13, _sum2); _sum3 = _mm512_fmadd_ps(_k11, _r14, _sum3); _sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0); _sum1 = _mm512_fmadd_ps(_k12, _r13, _sum1); _sum2 = _mm512_fmadd_ps(_k12, _r14, _sum2); _sum3 = _mm512_fmadd_ps(_k12, _r15, _sum3); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); __m512 _r23 = _mm512_load_ps(r2 + 48); __m512 _r24 = _mm512_load_ps(r2 + 64); __m512 _r25 = _mm512_load_ps(r2 + 80); _sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0); _sum1 = _mm512_fmadd_ps(_k20, _r21, _sum1); _sum2 = _mm512_fmadd_ps(_k20, _r22, _sum2); _sum3 = _mm512_fmadd_ps(_k20, _r23, _sum3); _sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0); _sum1 = _mm512_fmadd_ps(_k21, _r22, _sum1); _sum2 = _mm512_fmadd_ps(_k21, _r23, _sum2); _sum3 = _mm512_fmadd_ps(_k21, _r24, _sum3); _sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0); _sum1 = _mm512_fmadd_ps(_k22, _r23, _sum1); _sum2 = _mm512_fmadd_ps(_k22, _r24, _sum2); _sum3 = _mm512_fmadd_ps(_k22, _r25, _sum3); _mm512_store_ps(outptr0, _sum0); _mm512_store_ps(outptr0 + 16, _sum1); _mm512_store_ps(outptr0 + 32, _sum2); _mm512_store_ps(outptr0 + 48, _sum3); r0 += 64; r1 += 64; r2 += 64; outptr0 += 64; } for (; j + 1 < outw; j += 2) { __m512 _sum0 = _bias0; __m512 _sum1 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); __m512 _r03 = _mm512_load_ps(r0 + 48); _sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0); _sum1 = _mm512_fmadd_ps(_k00, _r01, _sum1); _sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0); _sum1 = _mm512_fmadd_ps(_k01, _r02, _sum1); _sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0); _sum1 = _mm512_fmadd_ps(_k02, _r03, _sum1); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); __m512 _r13 = _mm512_load_ps(r1 + 48); _sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0); _sum1 = _mm512_fmadd_ps(_k10, _r11, _sum1); _sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0); _sum1 = _mm512_fmadd_ps(_k11, _r12, _sum1); _sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0); _sum1 = _mm512_fmadd_ps(_k12, _r13, _sum1); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); __m512 _r23 = _mm512_load_ps(r2 + 48); _sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0); _sum1 = _mm512_fmadd_ps(_k20, _r21, _sum1); _sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0); _sum1 = _mm512_fmadd_ps(_k21, _r22, _sum1); _sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0); _sum1 = _mm512_fmadd_ps(_k22, _r23, _sum1); _mm512_store_ps(outptr0, _sum0); _mm512_store_ps(outptr0 + 16, _sum1); r0 += 32; r1 += 32; r2 += 32; outptr0 += 32; } for (; j < outw; j++) { __m512 _sum0 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); _sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); _sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); _sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0); _mm512_store_ps(outptr0, _sum0); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } r0 += 2 * 16; r1 += 2 * 16; r2 += 2 * 16; } } } static void convdw3x3s2_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 16; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m512 _bias0 = bias ? _mm512_loadu_ps((const float*)bias + g * 16) : _mm512_setzero_ps(); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m512 _k00 = _mm512_load_ps(k0); __m512 _k01 = _mm512_load_ps(k0 + 16); __m512 _k02 = _mm512_load_ps(k0 + 32); __m512 _k10 = _mm512_load_ps(k0 + 48); __m512 _k11 = _mm512_load_ps(k0 + 64); __m512 _k12 = _mm512_load_ps(k0 + 80); __m512 _k20 = _mm512_load_ps(k0 + 96); __m512 _k21 = _mm512_load_ps(k0 + 112); __m512 _k22 = _mm512_load_ps(k0 + 128); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m512 _sum0 = _bias0; __m512 _sum1 = _bias0; __m512 _sum2 = _bias0; __m512 _sum3 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); __m512 _r03 = _mm512_load_ps(r0 + 48); __m512 _r04 = _mm512_load_ps(r0 + 64); __m512 _r05 = _mm512_load_ps(r0 + 80); __m512 _r06 = _mm512_load_ps(r0 + 96); __m512 _r07 = _mm512_load_ps(r0 + 112); __m512 _r08 = _mm512_load_ps(r0 + 128); _sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0); _sum1 = _mm512_fmadd_ps(_k00, _r02, _sum1); _sum2 = _mm512_fmadd_ps(_k00, _r04, _sum2); _sum3 = _mm512_fmadd_ps(_k00, _r06, _sum3); _sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0); _sum1 = _mm512_fmadd_ps(_k01, _r03, _sum1); _sum2 = _mm512_fmadd_ps(_k01, _r05, _sum2); _sum3 = _mm512_fmadd_ps(_k01, _r07, _sum3); _sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0); _sum1 = _mm512_fmadd_ps(_k02, _r04, _sum1); _sum2 = _mm512_fmadd_ps(_k02, _r06, _sum2); _sum3 = _mm512_fmadd_ps(_k02, _r08, _sum3); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); __m512 _r13 = _mm512_load_ps(r1 + 48); __m512 _r14 = _mm512_load_ps(r1 + 64); __m512 _r15 = _mm512_load_ps(r1 + 80); __m512 _r16 = _mm512_load_ps(r1 + 96); __m512 _r17 = _mm512_load_ps(r1 + 112); __m512 _r18 = _mm512_load_ps(r1 + 128); _sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0); _sum1 = _mm512_fmadd_ps(_k10, _r12, _sum1); _sum2 = _mm512_fmadd_ps(_k10, _r14, _sum2); _sum3 = _mm512_fmadd_ps(_k10, _r16, _sum3); _sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0); _sum1 = _mm512_fmadd_ps(_k11, _r13, _sum1); _sum2 = _mm512_fmadd_ps(_k11, _r15, _sum2); _sum3 = _mm512_fmadd_ps(_k11, _r17, _sum3); _sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0); _sum1 = _mm512_fmadd_ps(_k12, _r14, _sum1); _sum2 = _mm512_fmadd_ps(_k12, _r16, _sum2); _sum3 = _mm512_fmadd_ps(_k12, _r18, _sum3); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); __m512 _r23 = _mm512_load_ps(r2 + 48); __m512 _r24 = _mm512_load_ps(r2 + 64); __m512 _r25 = _mm512_load_ps(r2 + 80); __m512 _r26 = _mm512_load_ps(r2 + 96); __m512 _r27 = _mm512_load_ps(r2 + 112); __m512 _r28 = _mm512_load_ps(r2 + 128); _sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0); _sum1 = _mm512_fmadd_ps(_k20, _r22, _sum1); _sum2 = _mm512_fmadd_ps(_k20, _r24, _sum2); _sum3 = _mm512_fmadd_ps(_k20, _r26, _sum3); _sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0); _sum1 = _mm512_fmadd_ps(_k21, _r23, _sum1); _sum2 = _mm512_fmadd_ps(_k21, _r25, _sum2); _sum3 = _mm512_fmadd_ps(_k21, _r27, _sum3); _sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0); _sum1 = _mm512_fmadd_ps(_k22, _r24, _sum1); _sum2 = _mm512_fmadd_ps(_k22, _r26, _sum2); _sum3 = _mm512_fmadd_ps(_k22, _r28, _sum3); _mm512_store_ps(outptr0, _sum0); _mm512_store_ps(outptr0 + 16, _sum1); _mm512_store_ps(outptr0 + 32, _sum2); _mm512_store_ps(outptr0 + 48, _sum3); r0 += 2 * 64; r1 += 2 * 64; r2 += 2 * 64; outptr0 += 64; } for (; j + 1 < outw; j += 2) { __m512 _sum0 = _bias0; __m512 _sum1 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); __m512 _r03 = _mm512_load_ps(r0 + 48); __m512 _r04 = _mm512_load_ps(r0 + 64); _sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0); _sum1 = _mm512_fmadd_ps(_k00, _r02, _sum1); _sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0); _sum1 = _mm512_fmadd_ps(_k01, _r03, _sum1); _sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0); _sum1 = _mm512_fmadd_ps(_k02, _r04, _sum1); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); __m512 _r13 = _mm512_load_ps(r1 + 48); __m512 _r14 = _mm512_load_ps(r1 + 64); _sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0); _sum1 = _mm512_fmadd_ps(_k10, _r12, _sum1); _sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0); _sum1 = _mm512_fmadd_ps(_k11, _r13, _sum1); _sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0); _sum1 = _mm512_fmadd_ps(_k12, _r14, _sum1); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); __m512 _r23 = _mm512_load_ps(r2 + 48); __m512 _r24 = _mm512_load_ps(r2 + 64); _sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0); _sum1 = _mm512_fmadd_ps(_k20, _r22, _sum1); _sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0); _sum1 = _mm512_fmadd_ps(_k21, _r23, _sum1); _sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0); _sum1 = _mm512_fmadd_ps(_k22, _r24, _sum1); _mm512_store_ps(outptr0, _sum0); _mm512_store_ps(outptr0 + 16, _sum1); r0 += 2 * 32; r1 += 2 * 32; r2 += 2 * 32; outptr0 += 32; } for (; j < outw; j++) { __m512 _sum0 = _bias0; __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 32); _sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0); __m512 _r10 = _mm512_load_ps(r1); __m512 _r11 = _mm512_load_ps(r1 + 16); __m512 _r12 = _mm512_load_ps(r1 + 32); _sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0); __m512 _r20 = _mm512_load_ps(r2); __m512 _r21 = _mm512_load_ps(r2 + 16); __m512 _r22 = _mm512_load_ps(r2 + 32); _sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0); _mm512_store_ps(outptr0, _sum0); r0 += 2 * 16; r1 += 2 * 16; r2 += 2 * 16; outptr0 += 16; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
syr2k.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "syr2k.h" /* Array initialization. */ static void init_array(int ni, int nj, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni), DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i __attribute__((annotate("scalar(range(0, " PB_XSTR(NI) ") final)"))); int j __attribute__((annotate("scalar(range(0, " PB_XSTR(NJ) ") final)"))); *alpha = 32412; *beta = 2123; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) i*j) / ni; B[i][j] = ((DATA_TYPE) i*j) / ni; } for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) C[i][j] = ((DATA_TYPE) i*j) / ni; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_syr2k(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni), DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j, k; #pragma scop #pragma omp parallel { /* C := alpha*A*B' + alpha*B*A' + beta*C */ #pragma omp for private (j) schedule(static) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NI; j++) C[i][j] *= beta; #pragma omp for private (j, k) schedule(static) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NI; j++) for (k = 0; k < _PB_NJ; k++) { C[i][j] += alpha * A[i][k] * B[j][k]; C[i][j] += alpha * B[i][k] * A[j][k]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ DATA_TYPE alpha __attribute__((annotate("target('alpha') scalar()"))); DATA_TYPE beta __attribute__((annotate("target('beta') scalar()"))); POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE __attribute__((annotate("target('C') scalar(range(0, 120000000000000) final)"))),NI,NI,ni,ni); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE __attribute__((annotate("target('A') scalar()"))),NI,NJ,ni,nj); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE __attribute__((annotate("target('B') scalar()"))),NI,NJ,ni,nj); /* Initialize array(s). */ init_array (ni, nj, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_syr2k (ni, nj, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
GB_unop__identity_bool_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_bool_int64 // op(A') function: GB_unop_tran__identity_bool_int64 // C type: bool // A type: int64_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_bool_int64 ( bool *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_bool_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__ge_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__ge_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__ge_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__ge_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint32) // A*D function (colscale): GB (_AxD__ge_uint32) // D*A function (rowscale): GB (_DxB__ge_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__ge_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__ge_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint32) // C=scalar+B GB (_bind1st__ge_uint32) // C=scalar+B' GB (_bind1st_tran__ge_uint32) // C=A+scalar GB (_bind2nd__ge_uint32) // C=A'+scalar GB (_bind2nd_tran__ge_uint32) // C type: bool // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT32 || GxB_NO_GE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
neutral.c
#include "neutral.h" #include "../../comms.h" #include "../../params.h" #include "../../shared.h" #include "../../shared_data.h" #include "../neutral_interface.h" #include <assert.h> #include <float.h> #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #ifdef MPI #include "mpi.h" #endif // Performs a solve of dependent variables for particle transport void solve_transport_2d( const int nx, const int ny, const int global_nx, const int global_ny, const uint64_t master_key, const int pad, const int x_off, const int y_off, const double dt, const int ntotal_particles, int* nparticles, const int* neighbours, Particle* particles, const double* density, const double* edgex, const double* edgey, const double* edgedx, const double* edgedy, CrossSection* cs_scatter_table, CrossSection* cs_absorb_table, double* energy_deposition_tally, uint64_t* reduce_array0, uint64_t* reduce_array1, uint64_t* reduce_array2, uint64_t* facet_events, uint64_t* collision_events) { if (!(*nparticles)) { printf("Out of particles\n"); return; } handle_particles(global_nx, global_ny, nx, ny, master_key, pad, x_off, y_off, 1, dt, neighbours, density, edgex, edgey, edgedx, edgedy, facet_events, collision_events, ntotal_particles, *nparticles, particles, cs_scatter_table, cs_absorb_table, energy_deposition_tally); } // Handles the current active batch of particles void handle_particles(const int global_nx, const int global_ny, const int nx, const int ny, const uint64_t master_key, const int pad, const int x_off, const int y_off, const int initial, const double dt, const int* neighbours, const double* density, const double* edgex, const double* edgey, const double* edgedx, const double* edgedy, uint64_t* facets, uint64_t* collisions, const int ntotal_particles, const int nparticles_to_process, Particle* particles_start, CrossSection* cs_scatter_table, CrossSection* cs_absorb_table, double* energy_deposition_tally) { int nthreads = 0; #pragma omp parallel { nthreads = omp_get_num_threads(); } uint64_t nfacets = 0; uint64_t ncollisions = 0; uint64_t nparticles = 0; double* p_x = particles_start->x; double* p_y = particles_start->y; double* p_omega_x = particles_start->omega_x; double* p_omega_y = particles_start->omega_y; double* p_energy = particles_start->energy; double* p_weight = particles_start->weight; double* p_dt_to_census = particles_start->dt_to_census; double* p_mfp_to_collision = particles_start->mfp_to_collision; int* p_cellx = particles_start->cellx; int* p_celly = particles_start->celly; int* p_dead = particles_start->dead; double* cs_scatter_table_keys = cs_scatter_table->keys; double* cs_scatter_table_values = cs_scatter_table->values; int cs_scatter_table_nentries = cs_scatter_table->nentries; double* cs_absorb_table_keys = cs_absorb_table->keys; double* cs_absorb_table_values = cs_absorb_table->values; int cs_absorb_table_nentries = cs_absorb_table->nentries; //int nt = nparticles_to_process/128+1; #pragma omp target teams distribute parallel for simd \ map(tofrom: nfacets, ncollisions, nparticles) \ reduction(+: nfacets, ncollisions, nparticles) for (int pp = 0; pp < nparticles_to_process; ++pp) { // (1) particle can stream and reach census // (2) particle can collide and either // - the particle will be absorbed // - the particle will scatter (this means the energy changes) // (3) particle encounters boundary region, transports to another cell if (p_dead[pp]) { continue; } nparticles++; int x_facet = 0; int absorb_cs_index = -1; int scatter_cs_index = -1; double cell_mfp = 0.0; // Determine the current cell int cellx = p_cellx[pp] - x_off + pad; int celly = p_celly[pp] - y_off + pad; double local_density = density[celly * (nx + 2 * pad) + cellx]; // Fetch the cross sections and prepare related quantities double microscopic_cs_scatter; microscopic_cs_for_energy( cs_scatter_table_keys, cs_scatter_table_values, cs_scatter_table_nentries, p_energy[pp], &scatter_cs_index, &microscopic_cs_scatter); double microscopic_cs_absorb; microscopic_cs_for_energy( cs_absorb_table_keys, cs_absorb_table_values, cs_absorb_table_nentries, p_energy[pp], &absorb_cs_index, &microscopic_cs_absorb); double number_density = (local_density * AVOGADROS / MOLAR_MASS); double macroscopic_cs_scatter = number_density * microscopic_cs_scatter * BARNS; double macroscopic_cs_absorb = number_density * microscopic_cs_absorb * BARNS; double speed = sqrt((2.0 * p_energy[pp] * eV_TO_J) / PARTICLE_MASS); double energy_deposition = 0.0; const double inv_ntotal_particles = 1.0 / (double)ntotal_particles; uint64_t counter = 0; double rn[NRANDOM_NUMBERS]; // Set time to census and MFPs until collision, unless travelled // particle if (initial) { p_dt_to_census[pp] = dt; generate_random_numbers(pp, master_key, counter++, &rn[0], &rn[1]); p_mfp_to_collision[pp] = -log(rn[0]) / macroscopic_cs_scatter; } // Loop until we have reached census while (p_dt_to_census[pp] > 0.0) { cell_mfp = 1.0 / (macroscopic_cs_scatter + macroscopic_cs_absorb); // Work out the distance until the particle hits a facet double distance_to_facet = 0.0; calc_distance_to_facet(global_nx, p_x[pp], p_y[pp], pad, x_off, y_off, p_omega_x[pp], p_omega_y[pp], speed, p_cellx[pp], p_celly[pp], &distance_to_facet, &x_facet, edgex, edgey); const double distance_to_collision = p_mfp_to_collision[pp] * cell_mfp; const double distance_to_census = speed * p_dt_to_census[pp]; // Check if our next event is a collision if (distance_to_collision < distance_to_facet && distance_to_collision < distance_to_census) { // Track the total number of collisions ncollisions++; // Handles a collision event int result = collision_event( global_nx, nx, x_off, y_off, master_key, inv_ntotal_particles, distance_to_collision, local_density, cs_absorb_table_keys, cs_scatter_table_keys, cs_absorb_table_values, cs_scatter_table_values, cs_absorb_table_nentries, cs_scatter_table_nentries, pp, p_x, p_y, p_cellx, p_celly, p_weight, p_energy, p_dead, p_omega_x, p_omega_y, p_dt_to_census, p_mfp_to_collision, &counter, &energy_deposition, &number_density, &microscopic_cs_scatter, &microscopic_cs_absorb, &macroscopic_cs_scatter, &macroscopic_cs_absorb, energy_deposition_tally, &scatter_cs_index, &absorb_cs_index, rn, &speed); if (result != PARTICLE_CONTINUE) { break; } } // Check if we have reached facet else if (distance_to_facet < distance_to_census) { // Track the number of fact encounters nfacets++; // Handle facet event int result = facet_event( global_nx, global_ny, nx, ny, x_off, y_off, inv_ntotal_particles, distance_to_facet, speed, cell_mfp, x_facet, density, neighbours, pp, p_energy, p_weight, p_mfp_to_collision, p_dt_to_census, p_x, p_y, p_omega_x, p_omega_y, p_cellx, p_celly, &energy_deposition, &number_density, &microscopic_cs_scatter, &microscopic_cs_absorb, &macroscopic_cs_scatter, &macroscopic_cs_absorb, energy_deposition_tally, &cellx, &celly, &local_density); if (result != PARTICLE_CONTINUE) { break; } } else { census_event(global_nx, nx, x_off, y_off, inv_ntotal_particles, distance_to_census, cell_mfp, pp, p_weight, p_energy, p_x, p_y, p_omega_x, p_omega_y, p_mfp_to_collision, p_dt_to_census, p_cellx, p_celly, &energy_deposition, &number_density, &microscopic_cs_scatter, &microscopic_cs_absorb, energy_deposition_tally); break; } } } // Store a total number of facets and collisions *facets += nfacets; *collisions += ncollisions; printf("Particles %llu\n", nparticles); } // Handles a collision event int collision_event( const int global_nx, const int nx, const int x_off, const int y_off, const uint64_t master_key, const double inv_ntotal_particles, const double distance_to_collision, const double local_density, const double* cs_absorb_table_keys, const double* cs_scatter_table_keys, const double* cs_absorb_table_values, const double* cs_scatter_table_values, const int cs_absorb_table_nentries, const int cs_scatter_table_nentries, const uint64_t pp, double* p_x, double* p_y, int* p_cellx, int* p_celly, double* p_weight, double* p_energy, int* p_dead, double* p_omega_x, double* p_omega_y, double* p_dt_to_census, double* p_mfp_to_collision, uint64_t* counter, double* energy_deposition, double* number_density, double* microscopic_cs_scatter, double* microscopic_cs_absorb, double* macroscopic_cs_scatter, double* macroscopic_cs_absorb, double* energy_deposition_tally, int* scatter_cs_index, int* absorb_cs_index, double rn[NRANDOM_NUMBERS], double* speed) { // Energy deposition stored locally for collision, not in tally mesh add_energy_deposition( global_nx, nx, x_off, y_off, p_energy[pp], p_weight[pp], inv_ntotal_particles, distance_to_collision, *number_density, *microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb, energy_deposition); // Moves the particle to the collision site p_x[pp] += distance_to_collision * p_omega_x[pp]; p_y[pp] += distance_to_collision * p_omega_y[pp]; const double p_absorb = *macroscopic_cs_absorb / (*macroscopic_cs_scatter + *macroscopic_cs_absorb); double rn0; double rn1; generate_random_numbers(pp, master_key, *counter, &rn0, &rn1); (*counter)++; if (rn0 < p_absorb) { /* Model particle absorption */ // Find the new particle weight after absorption, saving the energy change p_weight[pp] *= (1.0 - p_absorb); if (p_energy[pp] < MIN_ENERGY_OF_INTEREST) { // Energy is too low, so mark the particle for deletion p_dead[pp] = 1; // Need to store tally information as finished with particle update_tallies(nx, x_off, y_off, p_cellx[pp], p_celly[pp], inv_ntotal_particles, *energy_deposition, energy_deposition_tally); *energy_deposition = 0.0; return PARTICLE_DEAD; } } else { /* Model elastic particle scattering */ // The following assumes that all particles reside within a two-dimensional // plane, which solves a different equation. Change so that we consider // the full set of directional cosines, allowing scattering between planes. // Choose a random scattering angle between -1 and 1 const double mu_cm = 1.0 - 2.0 * rn1; // Calculate the new energy based on the relation to angle of incidence const double e_new = p_energy[pp] * (MASS_NO * MASS_NO + 2.0 * MASS_NO * mu_cm + 1.0) / ((MASS_NO + 1.0) * (MASS_NO + 1.0)); // Convert the angle into the laboratory frame of reference double cos_theta = 0.5 * ((MASS_NO + 1.0) * sqrt(e_new / p_energy[pp]) - (MASS_NO - 1.0) * sqrt(p_energy[pp] / e_new)); // Alter the direction of the velocities const double sin_theta = sqrt(1.0 - cos_theta * cos_theta); const double omega_x_new = (p_omega_x[pp] * cos_theta - p_omega_y[pp] * sin_theta); const double omega_y_new = (p_omega_x[pp] * sin_theta + p_omega_y[pp] * cos_theta); p_omega_x[pp] = omega_x_new; p_omega_y[pp] = omega_y_new; p_energy[pp] = e_new; } // Energy has changed so update the cross-sections microscopic_cs_for_energy( cs_scatter_table_keys, cs_scatter_table_values, cs_scatter_table_nentries, p_energy[pp], scatter_cs_index, microscopic_cs_scatter); microscopic_cs_for_energy( cs_absorb_table_keys, cs_absorb_table_values, cs_absorb_table_nentries, p_energy[pp], absorb_cs_index, microscopic_cs_absorb); *number_density = (local_density * AVOGADROS / MOLAR_MASS); *macroscopic_cs_scatter = *number_density * (*microscopic_cs_scatter) * BARNS; *macroscopic_cs_absorb = *number_density * (*microscopic_cs_absorb) * BARNS; // Re-sample number of mean free paths to collision generate_random_numbers(pp, master_key, *counter, &rn0, &rn1); (*counter)++; p_mfp_to_collision[pp] = -log(rn0) / *macroscopic_cs_scatter; p_dt_to_census[pp] -= distance_to_collision / *speed; *speed = sqrt((2.0 * p_energy[pp] * eV_TO_J) / PARTICLE_MASS); return PARTICLE_CONTINUE; } // Handle facet event int facet_event(const int global_nx, const int global_ny, const int nx, const int ny, const int x_off, const int y_off, const double inv_ntotal_particles, const double distance_to_facet, const double speed, const double cell_mfp, const int x_facet, const double* density, const int* neighbours, const uint64_t pp, double* p_energy, double* p_weight, double* p_mfp_to_collision, double* p_dt_to_census, double* p_x, double* p_y, double* p_omega_x, double* p_omega_y, int* p_cellx, int* p_celly, double* energy_deposition, double* number_density, double* microscopic_cs_scatter, double* microscopic_cs_absorb, double* macroscopic_cs_scatter, double* macroscopic_cs_absorb, double* energy_deposition_tally, int* cellx, int* celly, double* local_density) { // Update the mean free paths until collision p_mfp_to_collision[pp] -= (distance_to_facet / cell_mfp); p_dt_to_census[pp] -= (distance_to_facet / speed); add_energy_deposition( global_nx, nx, x_off, y_off, p_energy[pp], p_weight[pp], inv_ntotal_particles, distance_to_facet, *number_density, *microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb, energy_deposition); // Update tallies as we leave a cell update_tallies(nx, x_off, y_off, p_cellx[pp], p_celly[pp], inv_ntotal_particles, *energy_deposition, energy_deposition_tally); *energy_deposition = 0.0; // Move the particle to the facet p_x[pp] += distance_to_facet * p_omega_x[pp]; p_y[pp] += distance_to_facet * p_omega_y[pp]; if (x_facet) { if (p_omega_x[pp] > 0.0) { // Reflect at the boundary if (p_cellx[pp] >= (global_nx - 1)) { p_omega_x[pp] = -(p_omega_x[pp]); } else { // Moving to right cell p_cellx[pp]++; } } else if (p_omega_x[pp] < 0.0) { if (p_cellx[pp] <= 0) { // Reflect at the boundary p_omega_x[pp] = -(p_omega_x[pp]); } else { // Moving to left cell p_cellx[pp]--; } } } else { if (p_omega_y[pp] > 0.0) { // Reflect at the boundary if (p_celly[pp] >= (global_ny - 1)) { p_omega_y[pp] = -(p_omega_y[pp]); } else { // Moving to north cell p_celly[pp]++; } } else if (p_omega_y[pp] < 0.0) { // Reflect at the boundary if (p_celly[pp] <= 0) { p_omega_y[pp] = -(p_omega_y[pp]); } else { // Moving to south cell p_celly[pp]--; } } } // Update the data based on new cell *cellx = p_cellx[pp] - x_off; *celly = p_celly[pp] - y_off; *local_density = density[*celly * nx + *cellx]; *number_density = (*local_density * AVOGADROS / MOLAR_MASS); *macroscopic_cs_scatter = *number_density * *microscopic_cs_scatter * BARNS; *macroscopic_cs_absorb = *number_density * *microscopic_cs_absorb * BARNS; return PARTICLE_CONTINUE; } // Handles the census event void census_event(const int global_nx, const int nx, const int x_off, const int y_off, const double inv_ntotal_particles, const double distance_to_census, const double cell_mfp, const uint64_t pp, double* p_weight, double* p_energy, double* p_x, double* p_y, double* p_omega_x, double* p_omega_y, double* p_mfp_to_collision, double* p_dt_to_census, int* p_cellx, int* p_celly, double* energy_deposition, double* number_density, double* microscopic_cs_scatter, double* microscopic_cs_absorb, double* energy_deposition_tally) { // We have not changed cell or energy level at this stage p_x[pp] += distance_to_census * p_omega_x[pp]; p_y[pp] += distance_to_census * p_omega_y[pp]; p_mfp_to_collision[pp] -= (distance_to_census / cell_mfp); add_energy_deposition( global_nx, nx, x_off, y_off, p_energy[pp], p_weight[pp], inv_ntotal_particles, distance_to_census, *number_density, *microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb, energy_deposition); // Need to store tally information as finished with particle update_tallies(nx, x_off, y_off, p_cellx[pp], p_celly[pp], inv_ntotal_particles, *energy_deposition, energy_deposition_tally); p_dt_to_census[pp] = 0.0; } // Tallies the energy deposition in the cell void update_tallies(const int nx, const int x_off, const int y_off, const int p_cellx, const int p_celly, const double inv_ntotal_particles, const double energy_deposition, double* energy_deposition_tally) { const int cellx = p_cellx - x_off; const int celly = p_celly - y_off; #pragma omp atomic update energy_deposition_tally[celly * nx + cellx] += energy_deposition * inv_ntotal_particles; } // Calculate the distance to the next facet void calc_distance_to_facet(const int global_nx, const double p_x, const double p_y, const int pad, const int x_off, const int y_off, const double p_omega_x, const double p_omega_y, const double speed, const int particle_cellx, const int particle_celly, double* distance_to_facet, int* x_facet, const double* edgex, const double* edgey) { // Check the master_key required to move the particle along a single axis // If the velocity is positive then the top or right boundary will be hit const int cellx = particle_cellx - x_off + pad; const int celly = particle_celly - y_off + pad; double u_x_inv = 1.0 / (p_omega_x * speed); double u_y_inv = 1.0 / (p_omega_y * speed); // The bound is open on the left and bottom so we have to correct for this // and required the movement to the facet to go slightly further than the edge // in the calculated values, using OPEN_BOUND_CORRECTION, which is the // smallest possible distance from the closed bound e.g. 1.0e-14. double dt_x = (p_omega_x >= 0.0) ? ((edgex[cellx + 1]) - p_x) * u_x_inv : ((edgex[cellx] - OPEN_BOUND_CORRECTION) - p_x) * u_x_inv; double dt_y = (p_omega_y >= 0.0) ? ((edgey[celly + 1]) - p_y) * u_y_inv : ((edgey[celly] - OPEN_BOUND_CORRECTION) - p_y) * u_y_inv; *x_facet = (dt_x < dt_y) ? 1 : 0; // Calculated the projection to be // a = vector on first edge to be hit // u = velocity vector double mag_u0 = speed; if (*x_facet) { // We are centered on the origin, so the y component is 0 after travelling // aint the x axis to the edge (ax, 0).(x, y) *distance_to_facet = (p_omega_x >= 0.0) ? ((edgex[cellx + 1]) - p_x) * mag_u0 * u_x_inv : ((edgex[cellx] - OPEN_BOUND_CORRECTION) - p_x) * mag_u0 * u_x_inv; } else { // We are centered on the origin, so the x component is 0 after travelling // along the y axis to the edge (0, ay).(x, y) *distance_to_facet = (p_omega_y >= 0.0) ? ((edgey[celly + 1]) - p_y) * mag_u0 * u_y_inv : ((edgey[celly] - OPEN_BOUND_CORRECTION) - p_y) * mag_u0 * u_y_inv; } } // Calculate the energy deposition in the cell void add_energy_deposition( const int global_nx, const int nx, const int x_off, const int y_off, const double p_energy, const double p_weight, const double inv_ntotal_particles, const double path_length, const double number_density, const double microscopic_cs_absorb, const double microscopic_cs_total, double* ed) { // Calculate the energy deposition based on the path length const double average_exit_energy_absorb = 0.0; const double absorption_heating = (microscopic_cs_absorb / microscopic_cs_total) * average_exit_energy_absorb; const double average_exit_energy_scatter = p_energy * ((MASS_NO * MASS_NO + MASS_NO + 1) / ((MASS_NO + 1) * (MASS_NO + 1))); const double scattering_heating = (1.0 - (microscopic_cs_absorb / microscopic_cs_total)) * average_exit_energy_scatter; const double heating_response = (p_energy - scattering_heating - absorption_heating); *ed += p_weight * path_length * (microscopic_cs_total * BARNS) * heating_response * number_density; } // Fetch the cross section for a particular energy value void microscopic_cs_for_energy(const double* keys, const double* values, const int nentries, const double p_energy, int* cs_index, double* cs) { // Use a simple binary search to find the energy group int ind = nentries / 2; int width = ind / 2; while (p_energy < keys[ind] || p_energy >= keys[ind + 1]) { ind += (p_energy < keys[ind]) ? -width : width; width = max(1, width / 2); // To handle odd cases, allows one extra walk } // Return the value linearly interpolated *cs = values[ind] + ((p_energy - keys[ind]) / (keys[ind + 1] - keys[ind])) * (values[ind + 1] - values[ind]); } void generate_random_numbers(const uint64_t pkey, const uint64_t master_key, const uint64_t counter, double* rn0, double* rn1) { const int nrns = 2; threefry2x64_ctr_t ctr; threefry2x64_ctr_t key; ctr.v[0] = counter; ctr.v[1] = 0; key.v[0] = pkey; key.v[1] = master_key; // Generate the random numbers threefry2x64_ctr_t rand = threefry2x64(ctr, key); // Turn our random numbers from integrals to double precision uint64_t max_uint64 = UINT64_C(0xFFFFFFFFFFFFFFFF); const double factor = 1.0 / (max_uint64 + 1.0); const double half_factor = 0.5 * factor; *rn0 = rand.v[0] * factor + half_factor; *rn1 = rand.v[1] * factor + half_factor; } // Validates the results of the simulation void validate(const int nx, const int ny, const char* params_filename, const int rank, double* energy_deposition_tally) { // Reduce the entire energy deposition tally locally double local_energy_tally = 0.0; #pragma omp target teams distribute parallel for map( \ tofrom : local_energy_tally) reduction(+ : local_energy_tally) for (int ii = 0; ii < nx * ny; ++ii) { local_energy_tally += energy_deposition_tally[ii]; } // Finalise the reduction globally double global_energy_tally = reduce_all_sum(local_energy_tally); if (rank != MASTER) { return; } printf("\nFinal global_energy_tally %.15e\n", global_energy_tally); int nresults = 0; char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * (MAX_STR_LEN + 1)); double* values = (double*)malloc(sizeof(double) * MAX_KEYS); if (!get_key_value_parameter(params_filename, NEUTRAL_TESTS, keys, values, &nresults)) { printf("Warning. Test entry was not found, could NOT validate.\n"); return; } // Check the result is within tolerance printf("Expected %.12e, result was %.12e.\n", values[0], global_energy_tally); if (within_tolerance(values[0], global_energy_tally, VALIDATE_TOLERANCE)) { printf("PASSED validation.\n"); } else { printf("FAILED validation.\n"); } free(keys); free(values); } // Initialises a new particle ready for tracking size_t inject_particles(const int nparticles, const int global_nx, const int local_nx, const int local_ny, const int pad, const double local_particle_left_off, const double local_particle_bottom_off, const double local_particle_width, const double local_particle_height, const int x_off, const int y_off, const double dt, const double* edgex, const double* edgey, const double initial_energy, Particle** particles) { *particles = (Particle*)malloc(sizeof(Particle)); if (!*particles) { TERMINATE("Could not allocate particle array.\n"); } Particle* particle = *particles; size_t allocation = 0; allocation += allocate_data(&particle->x, nparticles * 1.5); allocation += allocate_data(&particle->y, nparticles * 1.5); allocation += allocate_data(&particle->omega_x, nparticles * 1.5); allocation += allocate_data(&particle->omega_y, nparticles * 1.5); allocation += allocate_data(&particle->energy, nparticles * 1.5); allocation += allocate_data(&particle->weight, nparticles * 1.5); allocation += allocate_data(&particle->dt_to_census, nparticles * 1.5); allocation += allocate_data(&particle->mfp_to_collision, nparticles * 1.5); allocation += allocate_int_data(&particle->cellx, nparticles * 1.5); allocation += allocate_int_data(&particle->celly, nparticles * 1.5); allocation += allocate_int_data(&particle->dead, nparticles * 1.5); double* p_x = particle->x; double* p_y = particle->y; double* p_omega_x = particle->omega_x; double* p_omega_y = particle->omega_y; double* p_energy = particle->energy; double* p_weight = particle->weight; double* p_dt_to_census = particle->dt_to_census; double* p_mfp_to_collision = particle->mfp_to_collision; int* p_cellx = particle->cellx; int* p_celly = particle->celly; int* p_dead = particle->dead; START_PROFILING(&compute_profile); #pragma omp target teams distribute parallel for for (int pp = 0; pp < nparticles; ++pp) { double rn[NRANDOM_NUMBERS]; generate_random_numbers(pp, 0, 0, &rn[0], &rn[1]); // Set the initial nandom location of the particle inside the source // region p_x[pp] = local_particle_left_off + rn[0] * local_particle_width; p_y[pp] = local_particle_bottom_off + rn[1] * local_particle_height; // Check the location of the specific cell that the particle sits within. // We have to check this explicitly because the mesh might be non-uniform. int cellx = 0; int celly = 0; for (int ii = 0; ii < local_nx; ++ii) { if (p_x[pp] >= edgex[ii + pad] && p_x[pp] < edgex[ii + pad + 1]) { cellx = x_off + ii; break; } } for (int ii = 0; ii < local_ny; ++ii) { if (p_y[pp] >= edgey[ii + pad] && p_y[pp] < edgey[ii + pad + 1]) { celly = y_off + ii; break; } } p_cellx[pp] = cellx; p_celly[pp] = celly; // Generating theta has uniform density, however 0.0 and 1.0 produce the // same // value which introduces very very very small bias... generate_random_numbers(pp, 0, 1, &rn[0], &rn[1]); const double theta = 2.0 * M_PI * rn[0]; p_omega_x[pp] = cos(theta); p_omega_y[pp] = sin(theta); // This approximation sets mono-energetic initial state for source // particles p_energy[pp] = initial_energy; // Set a weight for the particle to track absorption p_weight[pp] = 1.0; p_dt_to_census[pp] = dt; p_mfp_to_collision[pp] = 0.0; p_dead[pp] = 0; } STOP_PROFILING(&compute_profile, "initialising particles"); return allocation; }
ast-dump-openmp-ordered.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one() { #pragma omp ordered ; } void test_two(int x) { #pragma omp for ordered for (int i = 0; i < x; i++) ; } void test_three(int x) { #pragma omp for ordered(1) for (int i = 0; i < x; i++) { #pragma omp ordered depend(source) } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-ordered.c:3:1, line:6:1> line:3:6 test_one 'void ()' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:17, line:6:1> // CHECK-NEXT: | `-OMPOrderedDirective {{.*}} <line:4:1, col:20> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-NullStmt {{.*}} <col:3> // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:4:1) *const restrict' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:8:1, line:12:1> line:8:6 test_two 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:12:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:9:1, col:24> // CHECK-NEXT: | |-OMPOrderedClause {{.*}} <col:17, col:24> // CHECK-NEXT: | | `-<<<NULL>>> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:10:3, line:11:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:10:3, line:11:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:10:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:11:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:9:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:9:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:10:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:14:1, line:19:1> line:14:6 test_three 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:24, line:19:1> // CHECK-NEXT: `-OMPForDirective {{.*}} <line:15:1, col:27> // CHECK-NEXT: |-OMPOrderedClause {{.*}} <col:17, col:26> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:25> 'int' // CHECK-NEXT: | |-value: Int 1 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:25> 'int' 1 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:16:3, line:18:3> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:16:3, line:18:3> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:16:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-CompoundStmt {{.*}} <col:31, line:18:3> // CHECK-NEXT: | | `-OMPOrderedDirective {{.*}} <line:17:1, col:35> openmp_standalone_directive // CHECK-NEXT: | | `-OMPDependClause {{.*}} <col:21, <invalid sloc>> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:15:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:15:1) *const restrict' // CHECK-NEXT: | `-VarDecl {{.*}} <line:16:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
cpu.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Parts of the following code in this file refs to * https://github.com/Tencent/ncnn/blob/master/src/cpu.cpp * Tencent is pleased to support the open source community by making ncnn * available. * * Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. * * Licensed under the BSD 3-Clause License (the "License"); you may not use this * file except in compliance with the License. You may obtain a copy of the * License at * * https://opensource.org/licenses/BSD-3-Clause */ /* * Copyright (c) 2021, OPEN AI LAB * Author: lswang@openailab.com */ #include "cpu.h" #include "api/c_api.h" #include <stdio.h> #include <string.h> #include <limits.h> #include <stdint.h> #ifndef _MSC_VER #include <pthread.h> #include <sys/syscall.h> #include <sched.h> #include <unistd.h> #endif #if __APPLE__ #include "TargetConditionals.h" #if TARGET_OS_IPHONE #include <sys/types.h> #include <sys/sysctl.h> #include <mach/machine.h> #define __APPLE_IOS__ 1 #endif #endif #ifdef _OPENMP #include <omp.h> #endif static size_t core_count = 0; static size_t affinity_mask_all_cluster = 0; static size_t affinity_mask_big_cluster = 0; static size_t affinity_mask_medium_cluster = 0; static size_t affinity_mask_little_cluster = 0; int init_cpu_count() { if (0 < core_count) return core_count; #ifdef __ANDROID__ { FILE* cpu_info = fopen("/proc/cpuinfo", "rb"); if (!cpu_info) return -1; char buffer[1024]; while (!feof(cpu_info)) { char* s = fgets(buffer, 1024, cpu_info); if (!s) break; if (memcmp(buffer, "processor", 9) == 0) core_count++; } fclose(cpu_info); }; #elif __APPLE_IOS__ { size_t len = sizeof(core_count); sysctlbyname("hw.ncpu", &core_count, &len, NULL, 0); }; #else { #ifdef _OPENMP core_count = omp_get_max_threads(); #else core_count = 1; #endif } #endif // check count range if (core_count < 1) core_count = 1; // TODO: deal with this conditions if (core_count > sizeof(size_t) * 8) core_count = sizeof(size_t) * 8; return core_count; } #ifndef _MSC_VER static int get_max_freq_khz(int cpuid) { // first try, for all possible cpu char path[256]; sprintf(path, "/sys/devices/system/cpu/cpufreq/stats/cpu%d/time_in_state", cpuid); FILE* fp = fopen(path, "rb"); if (!fp) { // second try, for online cpu sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/stats/time_in_state", cpuid); fp = fopen(path, "rb"); if (fp) { int max_freq_khz = 0; while (!feof(fp)) { int freq_khz = 0; int nscan = fscanf(fp, "%d %*d", &freq_khz); if (nscan != 1) break; if (freq_khz > max_freq_khz) max_freq_khz = freq_khz; } fclose(fp); if (max_freq_khz != 0) return max_freq_khz; fp = NULL; } if (!fp) { // third try, for online cpu sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpuid); fp = fopen(path, "rb"); if (!fp) return -1; int max_freq_khz = -1; int ret = fscanf(fp, "%d", &max_freq_khz); fclose(fp); if (max_freq_khz <=0 && EOF == ret) return -1; else return max_freq_khz; } } int max_freq_khz = 0; while (!feof(fp)) { int freq_khz = 0; int nscan = fscanf(fp, "%d %*d", &freq_khz); if (nscan != 1) break; if (freq_khz > max_freq_khz) max_freq_khz = freq_khz; } fclose(fp); return max_freq_khz; } static int set_sched_affinity(size_t thread_affinity_mask) { // cpu_set_t definition // ref http://stackoverflow.com/questions/16319725/android-set-thread-affinity #ifndef CPU_SETSIZE #define CPU_SETSIZE 1024 #endif #ifndef __NCPUBITS #define __NCPUBITS (8 * sizeof (unsigned long)) #endif typedef struct { unsigned long __bits[CPU_SETSIZE / __NCPUBITS]; } cpu_set_t; #define CPU_SET(cpu, cpusetp) ((cpusetp)->__bits[(cpu) / __NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS))) #define CPU_ZERO(cpusetp) memset((cpusetp), 0, sizeof(cpu_set_t)) // set affinity for threads #if (defined __GLIBC__) || (defined _OHOS_) || (defined V831) pid_t pid = syscall(SYS_gettid); #else #ifdef PI3 pid_t pid = getpid(); #else #ifdef __APPLE__ uint64_t tid64; pthread_threadid_np(NULL, &tid64); pid_t pid = (pid_t)tid64; #else pid_t pid = gettid(); #endif #endif #endif cpu_set_t mask; CPU_ZERO(&mask); // for (int i = 0; i < ( int )sizeof(size_t) * 8; i++) for (int i = 0; i < core_count; i++) { if (thread_affinity_mask & (1 << i)) CPU_SET(i, &mask); } #if __APPLE__ int syscallret = syscall(set_sched_affinity, pid, sizeof(mask), &mask); #else int syscallret = syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask); #endif if (syscallret) { fprintf(stderr, "syscall error %d\n", syscallret); return -1; } return 0; } #endif int init_cluster_mask() { init_cpu_count(); if (0 != affinity_mask_all_cluster) return 0; affinity_mask_all_cluster = ((size_t)(1) << core_count) - (size_t)(1); //affinity_mask_all_cluster = (size_t)(0) - (size_t)(1); #ifndef _MSC_VER int max_freq_min_val = INT_MAX; int max_freq_max_val = 0; // TODO: deal with very large count of cores int max_freq_array[sizeof(size_t) * 8]; for (int i = 0; i < core_count; i++) { int max_freq_khz = get_max_freq_khz(i); // fprintf(stderr, "cpu %d, max_freq_khz %d\n", i, max_freq_khz); max_freq_array[i] = max_freq_khz; if (max_freq_khz > max_freq_max_val) max_freq_max_val = max_freq_khz; if (max_freq_khz < max_freq_min_val) max_freq_min_val = max_freq_khz; } if (max_freq_max_val == max_freq_min_val) { affinity_mask_big_cluster = affinity_mask_all_cluster; affinity_mask_medium_cluster = 0; affinity_mask_little_cluster = 0; } else { for (int i = 0; i < core_count; i++) { if (max_freq_array[i] == max_freq_max_val) affinity_mask_big_cluster |= (1 << i); else if (max_freq_array[i] == max_freq_min_val) affinity_mask_little_cluster |= (1 << i); else affinity_mask_medium_cluster |= (1 << i); } } #else // TODO implement me for other platforms affinity_mask_big_cluster = affinity_mask_all_cluster; #endif return 0; } int check_cpu() { init_cpu_count(); init_cluster_mask(); return 0; } int get_cpu_mask_count(size_t mask) { int count = 0; for (int i = 0; i < core_count; i++) if (mask & (1 << i)) count++; return count; } int set_cpu_affine(size_t mask) { #if defined __ANDROID__ || defined __linux__ int count = get_cpu_mask_count(mask); #ifdef _OPENMP // set affinity for each threads omp_set_num_threads(count); int status[sizeof(size_t) * 8] = {0}; #pragma omp parallel for num_threads(count) for (int i = 0; i < count; i++) { status[i] = set_sched_affinity(mask); } for (int i = 0; i < count; i++) { if (status[i] != 0) return -1; } #else int status = set_sched_affinity(mask); if (0 != status) return -1; #endif #elif __APPLE_IOS__ || _MSC_VER // threads affinity not supported on ios ( void )mask; return -1; #else int status = set_sched_affinity(mask); if (0 != status) return -1; return 0; #endif } size_t get_cpu_cluster_mask(int cluster) { switch (cluster) { case TENGINE_CLUSTER_BIG: if (0 != affinity_mask_big_cluster) return affinity_mask_big_cluster; break; case TENGINE_CLUSTER_MEDIUM: if (0 != affinity_mask_medium_cluster) return affinity_mask_medium_cluster; break; case TENGINE_CLUSTER_LITTLE: if (0 != affinity_mask_little_cluster) return affinity_mask_little_cluster; break; default: break; } return affinity_mask_all_cluster; }
Example_flush_nolist.1.c
/* * @@name: flush_nolist.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success */ int x, *p = &x; void f1(int *q) { *q = 1; #pragma omp flush /* x, p, and *q are flushed */ /* because they are shared and accessible */ /* q is not flushed because it is not shared. */ } void f2(int *q) { #pragma omp barrier *q = 2; #pragma omp barrier /* a barrier implies a flush */ /* x, p, and *q are flushed */ /* because they are shared and accessible */ /* q is not flushed because it is not shared. */ } int g(int n) { int i = 1, j, sum = 0; *p = 1; #pragma omp parallel reduction(+: sum) num_threads(10) { f1(&j); /* i, n and sum were not flushed */ /* because they were not accessible in f1 */ /* j was flushed because it was accessible */ sum += j; f2(&j); /* i, n, and sum were not flushed */ /* because they were not accessible in f2 */ /* j was flushed because it was accessible */ sum += i + j + *p + n; } return sum; } int main() { int result = g(7); return result; }
track_ellipse_gpu.c
#include "track_ellipse.h" // Host and device arrays to hold matrices for all cells // (so we can copy to and from the device in a single transfer) // The number of work items per work group #define LOCAL_WORK_SIZE 256 #define FP_TYPE float #define FP_CONST(num) num##f #define PI_FP32 FP_CONST(3.14159) #define ONE_OVER_PI (FP_CONST(1.0) / PI_FP32) #define MU FP_CONST(0.5) #define LAMBDA (FP_CONST(8.0) * MU + FP_CONST(1.0)) #define NEXT_LOWEST_POWER_OF_TWO 256 //--------------- device function --------------------------- #pragma omp declare target FP_TYPE heaviside(FP_TYPE x) { return (atanf(x) * ONE_OVER_PI) + FP_CONST(0.5); } #pragma omp end declare target // Host function that launches an OpenCL kernel to compute the MGVF matrices for the specified cells void IMGVF_GPU(MAT **IE, MAT **IMGVF, double vx, double vy, double e, int max_iterations, double cutoff, int num_cells) { // Initialize the data on the GPU // Allocate array of offsets to each cell's image size_t mem_size = sizeof(int) * num_cells; int* host_I_offsets = (int *) malloc(mem_size); // Allocate arrays to hold the dimensions of each cell's image int* host_m_array = (int *) malloc(mem_size); int* host_n_array = (int *) malloc(mem_size); // Figure out the size of all of the matrices combined int i, j; size_t total_size = 0; for (int cell_num = 0; cell_num < num_cells; cell_num++) { MAT *I = IE[cell_num]; size_t size = I->m * I->n; total_size += size; } size_t total_mem_size = total_size * sizeof(float); // Allocate host memory just once for all cells float* host_I_all = (float *) malloc(total_mem_size); float* host_IMGVF_all = (float *) malloc(total_mem_size); // Copy each initial matrix into the allocated host memory int offset = 0; for (int cell_num = 0; cell_num < num_cells; cell_num++) { MAT *I = IE[cell_num]; // Determine the size of the matrix int m = I->m, n = I->n; int size = m * n; // Store memory dimensions host_m_array[cell_num] = m; host_n_array[cell_num] = n; // Store offsets to this cell's image host_I_offsets[cell_num] = offset; // Copy matrix I (which is also the initial IMGVF matrix) into the overall array for (i = 0; i < m; i++) for (j = 0; j < n; j++) host_I_all[offset + (i * n) + j] = host_IMGVF_all[offset + (i * n) + j] = (float) m_get_val(I, i, j); offset += size; } // Convert double-precision parameters to single-precision float vx_float = (float) vx; float vy_float = (float) vy; float e_float = (float) e; float cutoff_float = (float) cutoff; #pragma omp target data map(to: host_I_offsets[0:num_cells],\ host_m_array[0:num_cells], \ host_n_array[0:num_cells], \ host_I_all[0:total_size]) \ map(tofrom: host_IMGVF_all[0:total_size]) { #include "kernel_IMGVF.h" } // Copy each result matrix into its appropriate host matrix offset = 0; for (int cell_num = 0; cell_num < num_cells; cell_num++) { MAT *IMGVF_out = IMGVF[cell_num]; // Determine the size of the matrix int m = IMGVF_out->m, n = IMGVF_out->n, i, j; // Pack the result into the matrix for (i = 0; i < m; i++) for (j = 0; j < n; j++) { #ifdef DEBUG printf("host_IMGVF: %f\n",host_IMGVF_all[offset + (i * n) + j]); #endif m_set_val(IMGVF_out, i, j, (double) host_IMGVF_all[offset + (i * n) + j]); } offset += (m * n); } // Free host memory free(host_m_array); free(host_n_array); free(host_I_all); free(host_I_offsets); free(host_IMGVF_all); }
GB_binop__land_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__land_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__land_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__land_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_fp64) // A*D function (colscale): GB (_AxD__land_fp64) // D*A function (rowscale): GB (_DxB__land_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__land_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__land_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_fp64) // C=scalar+B GB (_bind1st__land_fp64) // C=scalar+B' GB (_bind1st_tran__land_fp64) // C=A+scalar GB (_bind2nd__land_fp64) // C=A'+scalar GB (_bind2nd_tran__land_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_FP64 || GxB_NO_LAND_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
multisort-omp-leaf.c
#include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6f\n",(_m), stamp); // N and MIN must be powers of 2 long N; long MIN_SORT_SIZE; long MIN_MERGE_SIZE; #define BLOCK_SIZE 1024L #define T int void basicsort(long n, T data[n]); void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length); void merge(long n, T left[n], T right[n], T result[n*2], long start, long length) { if (length < MIN_MERGE_SIZE*2L) { // Base case #pragma omp task basicmerge(n, left, right, result, start, length); } else { // Recursive decomposition merge(n, left, right, result, start, length/2); merge(n, left, right, result, start + length/2, length/2); } } void multisort(long n, T data[n], T tmp[n]) { if (n >= MIN_SORT_SIZE*4L) { // Recursive decomposition multisort(n/4L, &data[0], &tmp[0]); multisort(n/4L, &data[n/4L], &tmp[n/4L]); multisort(n/4L, &data[n/2L], &tmp[n/2L]); multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L]); #pragma omp taskwait merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L); merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L); #pragma omp taskwait merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n); } else { // Base case #pragma omp task basicsort(n, data); } } static void initialize(long length, T data[length]) { long i; for (i = 0; i < length; i++) { if (i==0) { data[i] = rand(); } else { data[i] = ((data[i-1]+1) * i * 104723L) % N; } } } static void clear(long length, T data[length]) { long i; for (i = 0; i < length; i++) { data[i] = 0; } } void check_sorted(long n, T data[n]) { int unsorted=0; for (int i=1; i<n; i++) if (data[i-1] > data[i]) unsorted++; if (unsorted > 0) printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted); else { // printf ("data IS ordered; "); } } int main(int argc, char **argv) { if (argc != 4) { fprintf(stderr, "Usage: %s <vector size in K> <sort size in K> <merge size in K>\n", argv[0]); return 1; } N = atol(argv[1]) * BLOCK_SIZE; MIN_SORT_SIZE = atol(argv[2]) * BLOCK_SIZE; MIN_MERGE_SIZE = atol(argv[3]) * BLOCK_SIZE; T *data = malloc(N*sizeof(T)); T *tmp = malloc(N*sizeof(T)); double stamp; START_COUNT_TIME; initialize(N, data); clear(N, tmp); STOP_COUNT_TIME("Initialization time in seconds"); START_COUNT_TIME; #pragma omp parallel #pragma omp single multisort(N, data, tmp); STOP_COUNT_TIME("Multisort execution time"); START_COUNT_TIME; check_sorted (N, data); STOP_COUNT_TIME("Check sorted data execution time"); fprintf(stdout, "Multisort program finished\n"); return 0; }
mpi_omp.c
#include <stdio.h> #include "mpi.h" #include <omp.h> int main(int argc, char *argv[]) { int numprocs, rank, namelen; char processor_name[MPI_MAX_PROCESSOR_NAME]; int iam = 0, np = 1; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Get_processor_name(processor_name, &namelen); #pragma omp parallel default(shared) private(iam, np) { np = omp_get_num_threads(); iam = omp_get_thread_num(); printf("Hello from thread %d out of %d from process %d out of %d on %s\n", iam, np, rank, numprocs, processor_name); } MPI_Finalize(); }
timers.h
double start[64], elapsed[64]; //#pragma omp threadprivate (start, elapsed)
denoise.h
/// @file denoise.h /// @brief denoise module /// @author Jeff Perry <jeffsp@gmail.com> /// @version 1.0 /// @date 2011-10-31 #ifndef DENOISE_H #define DENOISE_H #include "horny_toad.h" #include "jack_rabbit.h" #include <fstream> namespace opp { /// @brief gaussian blur and rescale an image /// /// @tparam T image type /// @param p image /// @param kernel size of kernel in pixels /// @param stddev standard deviation of gaussian kernel /// @param scale downsampling scale /// /// @return the blurred and rescaled image template<typename T> T gaussian_blur (const T &p, size_t kernel, double stddev, size_t scale) { const size_t ROWS = p.rows () / scale; const size_t COLS = p.cols () / scale; T q (ROWS, COLS); // create gaussian kernel jack_rabbit::raster<double> g (kernel, kernel, 1.0); jack_rabbit::subscript_unary_function<double,horny_toad::gaussian_window> f (g.rows (), g.cols ()); f.stddev (stddev); std::transform (g.begin (), g.end (), g.begin (), f); //horny_toad::print2d (std::clog, g); // note that you can't divide by 1/(2*pi*stddev^2) because the tails are // clipped, and the kernel therefore won't sum to 1.0 double sum = accumulate (g.begin (), g.end (), 0.0); for (size_t i = 0; i < g.size (); ++i) g[i] /= sum; // blur and downsample at each point for (size_t i = 0; i < ROWS; ++i) { unsigned i2 = i * scale; for (size_t j = 0; j < COLS; ++j) { unsigned j2 = j * scale; assert (i < q.rows ()); assert (j < q.cols ()); assert (i2 < p.rows ()); assert (j2 < p.cols ()); q (i, j) = horny_toad::mirrored_dot_product (g, p, i2 - (kernel - 1) / 2, j2 - (kernel - 1) / 2); } } return q; } size_t index888 (unsigned a, unsigned b, unsigned c) { return (a << 16) + (b << 8) + c; } /// @brief single moment lookup table /// /// @tparam T value type of whatever lut is summing template<typename T=size_t> class lut1 { public: lut1 (size_t SZ = 0) : totals (SZ) , sums (SZ) { } size_t size () const { return totals.size (); } void resize (size_t n) { totals.resize (n); sums.resize (n); } void update (size_t i, T x) { assert (i < totals.size ()); assert (i < sums.size ()); #pragma omp atomic ++totals[i]; #pragma omp atomic sums[i] += x; } void update (size_t i, T x, T count) { assert (i < totals.size ()); assert (i < sums.size ()); #pragma omp atomic totals[i] += count; #pragma omp atomic sums[i] += x * count; } T sum (size_t i) const { assert (i < sums.size ()); return sums[i]; } size_t total (size_t i) const { assert (i < totals.size ()); return totals[i]; } std::ostream& write (std::ostream &s) const { // not portable s.write (reinterpret_cast<const char *> (&totals[0]), totals.size () * sizeof (size_t)); s.write (reinterpret_cast<const char *> (&sums[0]), sums.size () * sizeof (T)); return s; } std::istream& read (std::istream &s) { // not portable s.read (reinterpret_cast<char *> (&totals[0]), totals.size () * sizeof (size_t)); s.read (reinterpret_cast<char *> (&sums[0]), sums.size () * sizeof (T)); return s; } private: std::vector<size_t> totals; std::vector<T> sums; }; /// @brief helper I/O function for lut1<T> /// /// @tparam T /// @param s stream /// @param l lut /// /// @return istream template<typename T> std::istream& operator>> (std::istream &s, lut1<T> &l) { return l.read (s); } /// @brief helper I/O function for lut1<T> /// /// @tparam T /// @param s stream /// @param l lut /// /// @return ostream template<typename T> std::ostream& operator<< (std::ostream &s, const lut1<T> &l) { return l.write (s); } }; namespace denoise { const size_t PASSES = 3; typedef jack_rabbit::raster<unsigned char> image_t; typedef std::vector<image_t> images_t; /// @brief average two images together /// /// @tparam T image type /// @param a first image /// @param b second image /// /// @return average of two images template<typename T> T avg (const T &a, const T &b) { assert (a.rows () == b.rows ()); assert (a.cols () == b.cols ()); T c (a.rows (), a.cols ()); for (size_t i = 0; i < a.size (); ++i) c[i] = round ((a[i] + b[i]) / 2.0); return c; } class context { public: static size_t indexh (const image_t &p, size_t i, size_t j) { assert (i >= 0); assert (j >= 1); assert (i < p.rows ()); assert (j + 1 < p.cols ()); return opp::index888 (p (i, j - 1), p (i, j), p (i, j + 1)); } static size_t indexv (const image_t &p, size_t i, size_t j) { assert (i >= 1); assert (j >= 0); assert (i + 1 < p.rows ()); assert (j < p.cols ()); return opp::index888 (p (i - 1, j), p (i, j), p (i + 1, j)); } static size_t kernel_size () { return 3; } static opp::lut1<size_t> default_lut (size_t count) { opp::lut1<size_t> l (1 << 24); for (size_t a = 0; a < 256; ++a) for (size_t b = 0; b < 256; ++b) for (size_t c = 0; c < 256; ++c) l.update (opp::index888 (a, b, c), b, count); return l; } }; template<typename T> const T rescale (const T &p, const double scale) { T q (p); for (size_t i = 0; i < p.rows (); ++i) { for (size_t j = 0; j < p.cols (); ++j) { const int p0 = p (i, j); int scaled = round (p0 * scale); if (scaled > 255) scaled = 255; q (i, j) = scaled; } } return q; } template<typename C> class codec { private: opp::lut1<size_t> l; public: codec () : l (C::default_lut (1)) { } void update (const image_t &p, const image_t &q, const bool h) { update2 (p, q, h); if (h) update2 (horny_toad::fliplr (p), horny_toad::fliplr (q), h); else update2 (horny_toad::flipud (p), horny_toad::flipud (q), h); } void update2 (const image_t &p, const image_t &q, const bool h) { const size_t K = C::kernel_size (); if (h) { for (size_t i = K; i + K < p.rows (); ++i) for (size_t j = K; j + K < p.cols (); ++j) l.update (C::indexh (q, i, j), p (i, j)); } else { for (size_t i = K; i + K < p.rows (); ++i) for (size_t j = K; j + K < p.cols (); ++j) l.update (C::indexv (q, i, j), p (i, j)); } } image_t denoise (const image_t &p, const bool h) const { image_t q (p.rows (), p.cols ()); const size_t K = C::kernel_size() / 2; // offset to center for (size_t i = K; i + K < p.rows (); ++i) { for (size_t j = K; j + K < p.cols (); ++j) { const size_t n = h ? C::indexh (p, i, j) : C::indexv (p, i, j); // luts should have been preloaded assert (l.total (n) != 0); const double x = static_cast<double> (l.sum (n)) / l.total (n); assert (x >= 0.0); assert (x <= 255.0); q (i, j) = round (x); } } return q; } private: friend std::ostream& operator<< (std::ostream &s, const codec &c) { s << c.l; return s; } friend std::istream& operator>> (std::istream &s, codec &c) { s >> c.l; return s; } }; template<size_t N> class multi_codec { private: codec<context> c[N]; public: size_t lut_passes () const { return N; } void update (const image_t &p, const image_t &q, size_t pass) { image_t t (q); // restore q up to this pass for (size_t n = 0; n < pass; ++n) t = c[n].denoise (t, !(n & 1)); // update using restored image c[pass].update (p, t, !(pass & 1)); } image_t denoise (const image_t &q) const { image_t p (q); for (size_t n = 0; n < N; ++n) p = c[n].denoise (p, !(n & 1)); return p; } private: friend std::ostream& operator<< (std::ostream &s, const multi_codec &c) { for (auto i : c.c) s << i; return s; } friend std::istream& operator>> (std::istream &s, multi_codec &c) { for (auto &i : c.c) s >> i; return s; } }; } #endif
EmbeddingBag.h
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.) ******************************************************************************/ //#define JIT_REDUCE_COLS_IDX #ifdef JIT_REDUCE_COLS_IDX #include <libxsmm.h> #endif #include "utils.h" #include "rtm.h" template <typename T> class EmbeddingBagImpl { public: EmbeddingBagImpl(int M, int E) : M(M), E(E) { weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment); } ~EmbeddingBagImpl() { my_free(weight_); weight_ = 0; } void init(T low = -0.1, T high = 0.1) { init_random(M * E, weight_, low, high); } #ifdef JIT_REDUCE_COLS_IDX void forward(int N, int NS, const long *offsets, const long *indices, T *output_) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict output)[E] = (T(*)[*])output_; libxsmm_meltwfunction_reduce_cols_idx kernel; int _ld = E; kernel = libxsmm_dispatch_meltw_reduce_cols_idx(E, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, (sizeof(long) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32) ; #pragma omp parallel for for (int n = 0; n < N; n++) { libxsmm_meltw_reduce_cols_idx_param params; auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); params.n = end - start; params.ind_ptr = &indices[start]; params.inp_ptr = weight; params.out_ptr = &output[n][0]; kernel( &params ); } } #else void forward(int N, int NS, const long *offsets, const long *indices, T *output_) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict output)[E] = (T(*)[*])output_; #pragma omp parallel for for (int n = 0; n < N; n++) { auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); #pragma omp simd for (long v = 0; v < E; v++) output[n][v] = 0; for (long s = start; s < end; s++) { auto ind = indices[s]; #pragma omp simd for (long v = 0; v < E; v++) { output[n][v] += weight[ind][v]; } } } } #endif void backward(int N, int NS, const T *gradout_, const long *offsets, const long *indices, T *values_) { T(*__restrict gradout)[E] = (T(*)[*])gradout_; T(*__restrict values)[E] = (T(*)[*])values_; #pragma omp parallel for for (int n = 0; n < N; n++) { auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); for (long s = start; s < end; s++) { #pragma omp simd #ifdef STREAMING_WRITES #pragma vector nontemporal(values) #endif for (long v = 0; v < E; v++) values[s][v] = gradout[n][v]; } } } void update(int NS, const T *grads_, const long *indices, float lr) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict grads)[E] = (T(*)[*])grads_; SimpleSpinLock fallBackLock; #pragma omp parallel for for (long i = 0; i < NS; i++) { long ind = indices[i]; { TransactionScope guard(fallBackLock, 100, 0); #pragma omp simd for (long v = 0; v < E; v++) weight[ind][v] += lr * grads[i][v]; } } } T *weight_; int M; int E; }; typedef EmbeddingBagImpl<FTyp> EmbeddingBag;
packet-inl.h
/*! * Copyright (c) 2014 by Contributors * \file packet-inl.h * \brief Generic packet vectorization code */ #ifndef MSHADOW_PACKET_INL_H_ #define MSHADOW_PACKET_INL_H_ #ifdef __APPLE__ #include <stdlib.h> #else #include <malloc.h> #endif #include "./base.h" #include "./tensor.h" #include "./expression.h" namespace mshadow { /*! \brief namespace of packet math*/ namespace packet { enum PacketArch { kPlain, kSSE2, }; #if MSHADOW_USE_SSE #define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kSSE2 #else #define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kPlain #endif // whether packet operator is enabled. /*! * \brief Generic packet type * \tparam DType The data type of the packet. * \tparam Arch the Arch of the packet. */ template<typename DType, PacketArch Arch = MSHADOW_DEFAULT_PACKET> struct Packet; template<PacketArch Arch> struct AlignBytes { static const index_t value = 4; }; } // namespace packet } // namespace mshadow namespace mshadow { namespace packet { /*! * \brief analog to cudaMallocPitch, allocate a aligned space with num_line * lspace cells * \param out_pitch output parameter, the actuall space allocated for each line * \param lspace number of cells required for each line * \param num_line number of lines to be allocated */ inline void* AlignedMallocPitch(size_t *out_pitch, size_t lspace, size_t num_line) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t mask = (1 << bits) - 1; size_t pitch = ((lspace + mask) >> bits) << bits; *out_pitch = pitch; #ifdef _MSC_VER void *res = _aligned_malloc(pitch * num_line, 1 << bits); #else void *res; int ret = posix_memalign(&res, 1 << bits, pitch * num_line); CHECK_EQ(ret, 0) << "AlignedMallocPitch failed"; #endif if (res == NULL) { LOG(FATAL) << "AlignedMallocPitch failed"; } return res; } /*! * \brief free aligned space * \param ptr pointer to space to be freed */ inline void AlignedFree(void *ptr) { #ifdef _MSC_VER _aligned_free(ptr); #else free(ptr); #endif } /*! \brief check if a pointer is aligned */ template<PacketArch Arch> inline bool CheckAlign(size_t pitch) { const index_t bits = AlignBytes<Arch>::value; return !(pitch & ((1 << bits) - 1)); } /*! \brief check if a pointer is aligned */ template<PacketArch Arch> inline bool CheckAlign(void *ptr) { return CheckAlign<Arch>(reinterpret_cast<size_t>(ptr)); } /*! * \brief get upper bound of aligned index of size * \param size size of the array * \param fsize size of float */ template<typename DType, PacketArch Arch> inline index_t UpperAlign(index_t size) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t mask = (1 << bits) - 1; const index_t fsize = sizeof(DType); return (((size * fsize + mask) >> bits) << bits) / fsize; } /*! * \brief get lower bound of aligned index of size * \param size size of the array * \param fsize size of float */ template<typename DType, PacketArch Arch> inline index_t LowerAlign(index_t size) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t fsize = sizeof(DType); return (((size * fsize) >> bits) << bits) / fsize; } /*! * \brief generic Packet operator * \tparam OP The operator * \tparam DType The data type * \tparam Arch The architecture. */ template<typename OP, typename DType, PacketArch Arch> struct PacketOp { static const bool kEnabled = false; }; // specialization of operators template<typename DType, PacketArch Arch> struct PacketOp<op::plus, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs + rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::minus, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs - rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::mul, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs * rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::div, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs / rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::identity, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& src) { return src; } }; // savers to do storage template<typename SV, typename TFloat, PacketArch Arch> struct Saver{ MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) { Packet<TFloat, Arch> lhs = Packet<TFloat, Arch>::Load(dst); Packet<TFloat, Arch> ans = PacketOp<typename SV::OPType, TFloat, Arch>::Map(lhs, src); ans.Store(dst); } }; template<typename TFloat, PacketArch Arch> struct Saver<sv::saveto, TFloat, Arch> { MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) { src.Store(dst); } }; } // namespace packet } // namespace mshadow #include "packet/plain-inl.h" #if MSHADOW_USE_SSE && !defined(__CUDACC__) #include "packet/sse-inl.h" #endif namespace mshadow { namespace expr { typedef packet::PacketArch PacketArch; // same as plan, but use packet template<typename ExpType, typename DType, PacketArch Arch> class PacketPlan { public: /*! * \brief evaluate the expression at index [y][x], * x will be aligned to Packet<DType, Arch>::kSize */ MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const; MSHADOW_CINLINE DType Eval(index_t y, index_t x) const; }; template <typename Device, int dim, typename DType, PacketArch Arch> class PacketPlan<Tensor<Device, dim, DType>, DType, Arch> { public: explicit PacketPlan(const Tensor<Device, dim, DType> &t) :dptr_(t.dptr_), stride_(t.stride_) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::Packet<DType, Arch>::Load(&dptr_[y * stride_ + x]); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return dptr_[y * stride_ + x]; } private: const DType *dptr_; index_t stride_; }; template<typename DType, PacketArch Arch> class PacketPlan<ScalarExp<DType>, DType, Arch> { public: explicit PacketPlan(DType scalar) : scalar_(scalar) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::Packet<DType, Arch>::Fill(scalar_); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return scalar_; } private: DType scalar_; }; template<typename OP, typename TA, typename TB, int etype, typename DType, PacketArch Arch> class PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> { public: PacketPlan(const PacketPlan<TA, DType, Arch> &lhs, const PacketPlan<TB, DType, Arch> &rhs) : lhs_(lhs), rhs_(rhs) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::PacketOp<OP, DType, Arch>::Map(lhs_.EvalPacket(y, x), rhs_.EvalPacket(y, x)); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return OP::Map(lhs_.Eval(y, x), rhs_.Eval(y, x)); } private: PacketPlan<TA, DType, Arch> lhs_; PacketPlan<TB, DType, Arch> rhs_; }; template<typename OP, typename TA, int etype, typename DType, PacketArch Arch> class PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch> { public: PacketPlan(const PacketPlan<TA, DType, Arch> &src) : src_(src) {} MSHADOW_CINLINE packet::Packet<DType> EvalPacket(index_t y, index_t x) const { return packet::PacketOp<OP, DType, Arch>::Map(src_.EvalPacket(y, x)); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return OP::Map(src_.Eval(y, x)); } private: PacketPlan<TA, DType, Arch> src_; }; template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype> inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e); template<PacketArch Arch, typename DType> inline PacketPlan<ScalarExp<DType>, DType, Arch> MakePacketPlan(const ScalarExp<DType> &e) { return PacketPlan<ScalarExp<DType>, DType, Arch>(e.scalar_); } template<PacketArch Arch, typename T, typename DType> inline PacketPlan<T, DType, Arch> MakePacketPlan(const RValueExp<T, DType> &e) { return PacketPlan<T, DType, Arch>(e.self()); } template<PacketArch Arch, typename T, int dim, typename DType> inline PacketPlan<T, DType, Arch> MakePacketPlan(const MakeTensorExp<T, cpu, dim, DType> &e) { return PacketPlan<T, DType, Arch>(e.real_self()); } template<PacketArch Arch, typename OP, typename TA, typename DType, int etype> inline PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch> MakePacketPlan(const UnaryMapExp<OP, TA, DType, etype> &e) { return PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch>(MakePacketPlan<Arch>(e.src_)); } template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype> inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e) { return PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch>(MakePacketPlan<Arch>(e.lhs_), MakePacketPlan<Arch>(e.rhs_)); } /*! * \brief static check packet enable * * \tparam Device the type of Device * \tparam dim dimension of the tensor * \tparam E expression */ template<typename E, PacketArch Arch> struct PacketCheck{ static const bool kPass = false; }; template<PacketArch Arch> struct PacketCheck<float, Arch> { static const bool kPass = true; }; template<PacketArch Arch> struct PacketCheck<double, Arch> { static const bool kPass = true; }; template<typename DType, PacketArch Arch> struct PacketCheck<ScalarExp<DType>, Arch> { static const bool kPass = PacketCheck<DType, Arch>::kPass; }; template<int dim, typename DType, PacketArch Arch> struct PacketCheck<Tensor<cpu, dim, DType>, Arch> { static const bool kPass = PacketCheck<DType, Arch>::kPass; }; template<typename OP, typename TA, typename DType, int etype, PacketArch Arch> struct PacketCheck<UnaryMapExp<OP, TA, DType, etype>, Arch> { static const bool kPass = PacketCheck<TA, Arch>::kPass && packet::PacketOp<OP, DType, Arch>::kEnabled; }; template<typename OP, typename TA, typename TB, typename DType, int etype, PacketArch Arch> struct PacketCheck< BinaryMapExp<OP, TA, TB, DType, etype>, Arch> { static const bool kPass = packet::PacketOp<OP, DType, Arch>::kEnabled && PacketCheck<TA, Arch>::kPass && PacketCheck<TB, Arch>::kPass; }; //---------------------------------------------------- // Check if data is aligned and allow packet operation //---------------------------------------------------- template<int dim, typename E, PacketArch Arch> struct PacketAlignCheck { inline static bool Check(const E &exp) { return false; } }; template<int dim, typename DType, PacketArch Arch> struct PacketAlignCheck<dim, ScalarExp<DType>, Arch> { inline static bool Check(const ScalarExp<DType> &exp) { return true; } }; template<int dim, typename DType, PacketArch Arch> struct PacketAlignCheck<dim, Tensor<cpu, dim, DType>, Arch> { inline static bool Check(const Tensor<cpu, dim, DType> &t) { return packet::CheckAlign<Arch>(t.dptr_) && packet::CheckAlign<Arch>(t.stride_ * sizeof(DType)); } }; template<int dim, typename OP, typename TA, typename DType, int etype, PacketArch Arch> struct PacketAlignCheck<dim, UnaryMapExp<OP, TA, DType, etype>, Arch> { inline static bool Check(const UnaryMapExp<OP, TA, DType, etype> &t) { return PacketAlignCheck<dim, TA, Arch>::Check(t.src_); } }; template<int dim, typename OP, typename TA, typename TB, typename DType, int etype, PacketArch Arch> struct PacketAlignCheck<dim, BinaryMapExp<OP, TA, TB, DType, etype>, Arch> { inline static bool Check(const BinaryMapExp<OP, TA, TB, DType, etype> &t) { return PacketAlignCheck<dim, TA, Arch>::Check(t.lhs_) && PacketAlignCheck<dim, TB, Arch>::Check(t.rhs_); } }; /*! * \brief use PacketPlan to compute result */ template<typename SV, typename E, int dim, typename DType, PacketArch Arch> inline void MapPacketPlan(Tensor<cpu, dim, DType> _dst, const expr::PacketPlan<E, DType, Arch>& plan) { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); const index_t xlen = packet::LowerAlign<DType, Arch>(dst.size(1)); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t x = 0; x < xlen; x += packet::Packet<DType, Arch>::kSize) { packet::Saver<SV, DType, Arch>::Save(&dst[y][x], plan.EvalPacket(y, x)); } for (index_t x = xlen; x < dst.size(1); ++x) { SV::Save(dst[y][x], plan.Eval(y, x)); } } } } // namespace expr } // namespace mshadow #endif // MSHADOW_PACKET_INL_H_
symm_c_coo_u_lo_row_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #define CACHELINE 64 alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; ++r) for (ALPHA_INT c = 0; c < n; c++) { ALPHA_Number tmp1, tmp2; alpha_mul(tmp1, y[index2(r, c, ldy)], beta); alpha_mul(tmp2, x[index2(r, c, ldx)], alpha); alpha_add(y[index2(r, c, ldy)], tmp1, tmp2); } ALPHA_INT block_size = CACHELINE / sizeof(ALPHA_Number); ALPHA_INT block_num = (columns + block_size - 1) / block_size; if (num_threads > block_num) num_threads = block_num; #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid, num_threads, block_num) * block_size; ALPHA_INT bch = cross_block_high(tid, num_threads, block_num) * block_size; if (bch > columns) bch = columns; for (ALPHA_INT ai = 0; ai < mat->nnz; ai++) { ALPHA_INT ac = mat->col_indx[ai]; ALPHA_INT r = mat->row_indx[ai]; if (ac < r) { ALPHA_Number val; alpha_mul_3c(val, alpha, mat->values[ai]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(ac, c, ldy)], val, x[index2(r, c, ldx)]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
omp_ex1.c
#include <stdio.h> #include<stdlib.h> #include <omp.h> int main (int argc, char *argv[]){ if(argc != 2){ printf("usage: %s [num threads] \n",argv[0]); return 0; } int i,a = 0; int num_threads = atoi(argv[1]); printf("num threads: %d\n",num_threads); omp_set_num_threads(num_threads); #pragma omp parallel for for(i = 0; i < 1000; ++i){ a++; } printf("a is %d\n",a); }
omp_kmeans.c
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* File: kmeans_clustering.c (OpenMP version) */ /* Description: Implementation of simple k-means clustering algorithm */ /* This program takes an array of N data objects, each with */ /* M coordinates and performs a k-means clustering given a */ /* user-provided value of the number of clusters (K). The */ /* clustering results are saved in 2 arrays: */ /* 1. a returned array of size [K][N] indicating the center */ /* coordinates of K clusters */ /* 2. membership[N] stores the cluster center ids, each */ /* corresponding to the cluster a data object is assigned */ /* */ /* Author: Wei-keng Liao */ /* ECE Department, Northwestern University */ /* email: wkliao@ece.northwestern.edu */ /* Copyright, 2005, Wei-keng Liao */ /* */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "kmeans.h" /*----< euclid_dist_2() >----------------------------------------------------*/ /* square of Euclid distance between two multi-dimensional points */ __inline static float euclid_dist_2(int numdims, /* no. dimensions */ float *coord1, /* [numdims] */ float *coord2) /* [numdims] */ { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (coord1[i]-coord2[i]) * (coord1[i]-coord2[i]); return(ans); } /*----< find_nearest_cluster() >---------------------------------------------*/ __inline static int find_nearest_cluster(int numClusters, /* no. clusters */ int numCoords, /* no. coordinates */ float *distance, float *object, /* [numCoords] */ float **clusters) /* [numClusters][numCoords] */ { int index, i; float dist, min_dist; /* find the cluster id that has min distance to object */ index = 0; min_dist = euclid_dist_2(numCoords, object, clusters[0]); for (i=1; i<numClusters; i++) { dist = euclid_dist_2(numCoords, object, clusters[i]); /* no need square root */ if (dist < min_dist) { /* find the min and its array index */ min_dist = dist; index = i; } } *distance = min_dist; return(index); } /*----< kmeans_clustering() >------------------------------------------------*/ /* return an array of cluster centers of size [numClusters][numCoords] */ float** omp_kmeans(int is_perform_atomic, /* in: */ float **objects, /* in: [numObjs][numCoords] */ int numCoords, /* no. coordinates */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float **clustersInit, float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, k, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **clusters; /* out: [numClusters][numCoords] */ float **newClusters; /* [numClusters][numCoords] */ double timing; int nthreads; /* no. threads */ int **local_newClusterSize; /* [nthreads][numClusters] */ float ***local_newClusters; /* [nthreads][numClusters][numCoords] */ nthreads = omp_get_max_threads(); /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ clusters = (float**) malloc(numClusters * sizeof(float*)); assert(clusters != NULL); clusters[0] = (float*) malloc(numClusters * numCoords * sizeof(float)); assert(clusters[0] != NULL); for (i=1; i<numClusters; i++) clusters[i] = clusters[i-1] + numCoords; /* pick clusterInit as initial objects of cluster elements*/ for (i=0; i<numClusters; i++) for (j=0; j<numCoords; j++) clusters[i][j] = clustersInit[i][j]; /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*) calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); newClusters = (float**) malloc(numClusters * sizeof(float*)); assert(newClusters != NULL); newClusters[0] = (float*) calloc(numClusters * numCoords, sizeof(float)); assert(newClusters[0] != NULL); for (i=1; i<numClusters; i++) newClusters[i] = newClusters[i-1] + numCoords; /* initialize dist */ float* dist = (float*) malloc(numObjs * sizeof(float)); float totalDistance = 0.0; if (!is_perform_atomic) { /* each thread calculates new centers using a private space, then thread 0 does an array reduction on them. This approach should be faster */ local_newClusterSize = (int**) malloc(nthreads * sizeof(int*)); assert(local_newClusterSize != NULL); local_newClusterSize[0] = (int*) calloc(nthreads*numClusters, sizeof(int)); assert(local_newClusterSize[0] != NULL); for (i=1; i<nthreads; i++) local_newClusterSize[i] = local_newClusterSize[i-1]+numClusters; /* local_newClusters is a 3D array */ local_newClusters =(float***)malloc(nthreads * sizeof(float**)); assert(local_newClusters != NULL); local_newClusters[0] =(float**) malloc(nthreads * numClusters * sizeof(float*)); assert(local_newClusters[0] != NULL); for (i=1; i<nthreads; i++) local_newClusters[i] = local_newClusters[i-1] + numClusters; for (i=0; i<nthreads; i++) { for (j=0; j<numClusters; j++) { local_newClusters[i][j] = (float*)calloc(numCoords, sizeof(float)); assert(local_newClusters[i][j] != NULL); } } } if (_debug) timing = omp_get_wtime(); do { delta = 0.0; if (is_perform_atomic) { #pragma omp parallel for \ private(i,j,index) \ firstprivate(numObjs,numClusters,numCoords) \ shared(objects,clusters,membership,newClusters,newClusterSize) \ schedule(static) \ reduction(+:delta) for (i=0; i<numObjs; i++) { /* find the array index of nestest cluster center */ index = find_nearest_cluster(numClusters, numCoords, &dist[i], objects[i], clusters); /* if membership changes, increase delta by 1 */ if (membership[i] != index) delta += 1.0; /* assign the membership to object i */ membership[i] = index; /* update new cluster centers : sum of objects located within */ #pragma omp atomic newClusterSize[index]++; for (j=0; j<numCoords; j++) #pragma omp atomic newClusters[index][j] += objects[i][j]; } } else { #pragma omp parallel \ shared(objects,clusters,membership,local_newClusters,local_newClusterSize) { int tid = omp_get_thread_num(); #pragma omp for \ private(i,j,index) \ firstprivate(numObjs,numClusters,numCoords) \ schedule(static) \ reduction(+:delta) for (i=0; i<numObjs; i++) { /* find the array index of nestest cluster center */ index = find_nearest_cluster(numClusters, numCoords, &dist[i], objects[i], clusters); /* if membership changes, increase delta by 1 */ if (membership[i] != index) delta += 1.0; /* assign the membership to object i */ membership[i] = index; /* update new cluster centers : sum of all objects located within (average will be performed later) */ local_newClusterSize[tid][index]++; for (j=0; j<numCoords; j++) local_newClusters[tid][index][j] += objects[i][j]; } } /* end of #pragma omp parallel */ /* let the main thread perform the array reduction */ for (i=0; i<numClusters; i++) { for (j=0; j<nthreads; j++) { newClusterSize[i] += local_newClusterSize[j][i]; local_newClusterSize[j][i] = 0.0; for (k=0; k<numCoords; k++) { newClusters[i][k] += local_newClusters[j][i][k]; local_newClusters[j][i][k] = 0.0; } } } } /* average the sum and replace old cluster centers with newClusters */ for (i=0; i<numClusters; i++) { for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 1) clusters[i][j] = newClusters[i][j] / newClusterSize[i]; newClusters[i][j] = 0.0; /* set back to 0 */ } newClusterSize[i] = 0; /* set back to 0 */ } /* compute total distance and display results*/ totalDistance = 0.0; for (i=0; i<numObjs; i++) totalDistance += dist[i]; delta /= numObjs; if (_debug) printf("Total distance = %f delta = %.3f\n", totalDistance, delta); } while (delta > threshold && loop++ < 500); *loop_iterations = loop + 1; if (_debug) { timing = omp_get_wtime() - timing; printf("nloops = %2d (T = %7.4f)",loop,timing); } if (!is_perform_atomic) { free(local_newClusterSize[0]); free(local_newClusterSize); for (i=0; i<nthreads; i++) for (j=0; j<numClusters; j++) free(local_newClusters[i][j]); free(local_newClusters[0]); free(local_newClusters); } free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential type of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLab(const double red,const double green, const double blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static void ConvertRGBToLuv(const double red,const double green, const double blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(image,ClampToQuantum(GetPixelIntensity(image,q)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; y_map[i].x=0.010566*i; z_map[i].x=0.002052*i; x_map[i].y=(-0.003296)*i; y_map[i].y=(-0.006471)*i; z_map[i].y=0.009768*i; x_map[i].z=0.009410*i; y_map[i].z=(-0.007880)*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,sRGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image)) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); if (IdentifyImageMonochrome(image,exception) == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == LinearGRAYColorspace) return(GrayscaleImage(image,Rec709LuminancePixelIntensityMethod,exception)); if (colorspace == GRAYColorspace) return(GrayscaleImage(image,Rec709LumaPixelIntensityMethod,exception)); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,double *red,double *green,double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(image,q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TransformsRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
conjugate_gradient.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include "nb/memory_bot.h" #include "nb/solver_bot/sparse/sparse.h" #include "nb/solver_bot/sparse/solvers/conjugate_gradient.h" #include "../sparse_struct.h" int nb_sparse_solve_conjugate_gradient (const nb_sparse_t *const A, const double *const b, double *_x, /* Out */ uint32_t max_iter, double tolerance, uint32_t* niter_performed, /* Out (NULL if not required) */ double* tolerance_reached, /* Out (NULL if not required) */ uint32_t omp_parallel_threads) /* Return the num of iterations */ { /* Solve Ax = b with Conjugate Gradient method */ double *g = nb_allocate_zero_mem(A->N * sizeof(double)); double *p = nb_allocate_zero_mem(A->N * sizeof(double)); double *w = nb_allocate_zero_mem(A->N * sizeof(double)); double dot_gg = 0; #pragma omp parallel for reduction(+:dot_gg) num_threads(omp_parallel_threads) schedule(guided) for(uint32_t i=0; i< A->N; i++){ double sum = 0; for(uint32_t j=0; j< A->rows_size[i]; j++) sum += A->rows_values[i][j] * _x[A->rows_index[i][j]]; g[i] = sum - b[i]; p[i] = -g[i]; dot_gg += g[i]*g[i]; } uint32_t k = 0; while(dot_gg > tolerance*tolerance && k < max_iter){ double dot_pw = 0; dot_gg = 0; #pragma omp parallel for reduction(+:dot_pw, dot_gg) num_threads(omp_parallel_threads) schedule(guided) for(uint32_t i = 0; i< A->N; i++){ w[i] = 0; for(uint32_t j = 0; j< A->rows_size[i]; j++) w[i] += A->rows_values[i][j] * p[A->rows_index[i][j]]; dot_pw += p[i]*w[i]; dot_gg += g[i]*g[i]; } double alphak = dot_gg/dot_pw; double dot_gkgk = 0; #pragma omp parallel for reduction(+:dot_gkgk) num_threads(omp_parallel_threads) schedule(guided) for(uint32_t i=0; i< A->N; i++){ _x[i] += alphak*p[i]; g[i] += alphak*w[i]; dot_gkgk += g[i]*g[i]; } double betak = dot_gkgk/dot_gg; #pragma omp parallel for num_threads(omp_parallel_threads) for(uint32_t i=0; i< A->N; i++) p[i] = -g[i] + betak * p[i]; k++; } /* Free memory */ nb_free_mem(g); nb_free_mem(p); nb_free_mem(w); if(niter_performed != NULL) niter_performed[0]= k; if(tolerance_reached != NULL) *tolerance_reached = sqrt(dot_gg); if(dot_gg > tolerance*tolerance) return 1; return 0; }
acc_JKmat.h
#ifndef __ACC_JKMAT_H__ #define __ACC_JKMAT_H__ #include "TinyDFT_typedef.h" #define ACC_JKMAT_IN_PARAM TinyDFT_p TinyDFT, int tid, int M, int N, int P, int Q, \ double *ERI, int load_P, int write_P, \ double *FM_strip_buf, int FM_strip_offset, \ double *FN_strip_buf, int FN_strip_offset #ifdef __cplusplus extern "C" { #endif // Accumulate an shell quartet ERI tensor to local J and K matrix buffers void acc_JKmat(ACC_JKMAT_IN_PARAM); // Accumulate a list of shell quartet ERI tensors to local J and K matrix buffers void acc_JKmat_with_ket_sp_list( TinyDFT_p TinyDFT, int tid, int M, int N, int npair, int *P_list, int *Q_list, double *ERIs, int nint, double *FM_strip_buf, double *FN_strip_buf, int *Mpair_flag, int *Npair_flag, int build_J, int build_K ); static inline void atomic_add_f64(volatile double *global_value, double addend) { uint64_t expected_value, new_value; do { double old_value = *global_value; double tmp_value; #ifdef __INTEL_COMPILER expected_value = _castf64_u64(old_value); new_value = _castf64_u64(old_value + addend); #else expected_value = *(uint64_t*) &old_value; tmp_value = old_value + addend; new_value = *(uint64_t*) &tmp_value; #endif } while (!__sync_bool_compare_and_swap((volatile uint64_t*)global_value, expected_value, new_value)); } static inline void atomic_add_vector(double *dst, double *src, int length) { for (int i = 0; i < length; i++) atomic_add_f64(&dst[i], src[i]); } static inline void direct_add_vector(double *dst, double *src, int length) { #pragma omp simd for (int i = 0; i < length; i++) dst[i] += src[i]; } #ifdef __cplusplus } #endif #endif
flexDiagonalOperator.h
#ifndef flexDiagonalOperator_H #define flexDiagonalOperator_H #include <vector> #include "flexLinearOperator.h" //! represents a diagonal operator template <typename T> class flexDiagonalOperator : public flexLinearOperator<T> { #ifdef __CUDACC__ typedef thrust::device_vector<T> Tdata; #else typedef std::vector<T> Tdata; #endif private: Tdata diagonalElements; public: //! initializes the concatenation operator for non-CUDA versions /*! \param aDiagonalElements vector of diagonal Elements \param aMinus determines if operator is negated \sa isMinus */ flexDiagonalOperator(std::vector<T> aDiagonalElements, bool aMinus) : flexLinearOperator<T>(static_cast<int>(aDiagonalElements.size()), static_cast<int>(aDiagonalElements.size()), diagonalOp, aMinus) { this->diagonalElements.resize(aDiagonalElements.size()); #ifdef __CUDACC__ thrust::copy(aDiagonalElements.begin(), aDiagonalElements.end(), this->diagonalElements.begin()); #else this->diagonalElements = aDiagonalElements; #endif } #ifdef __CUDACC__ //! initializes the concatenation operator for CUDA versions /*! \param aDiagonalElements vector of diagonal Elements where Tdata is of type thrust::device_vector<T> \param aMinus determines if operator is negated \sa isMinus */ flexDiagonalOperator(Tdata aDiagonalElements, bool aMinus) : diagonalElements(aDiagonalElements), flexLinearOperator<T>(static_cast<int>(aDiagonalElements.size()), static_cast<int>(aDiagonalElements.size()), diagonalOp, aMinus) { }; #endif flexDiagonalOperator<T>* copy() { flexDiagonalOperator<T>* A = new flexDiagonalOperator<T>(this->diagonalElements, this->isMinus); return A; } #ifdef __CUDACC__ struct flexDiagonalOperatorFunctor { __host__ __device__ flexDiagonalOperatorFunctor(const mySign _s) : s(_s){} template <typename Tuple> __host__ __device__ void operator()(Tuple t) { switch (this->s) { case PLUS: { thrust::get<0>(t) += thrust::get<1>(t) * thrust::get<2>(t); break; } case MINUS: { thrust::get<0>(t) -= thrust::get<1>(t) * thrust::get<2>(t); break; } case EQUALS: { thrust::get<0>(t) = thrust::get<1>(t) * thrust::get<2>(t); break; } } } mySign s; }; #endif //apply linear operator to vector void times(bool transposed, const Tdata &input, Tdata &output) { this->doTimes(input,output,EQUALS); } void timesPlus(bool transposed, const Tdata &input, Tdata &output) { if (this->isMinus) { this->doTimes(input,output, MINUS); } else { this->doTimes(input,output, PLUS); } } void timesMinus(bool transposed, const Tdata &input, Tdata &output) { if (this->isMinus) { this->doTimes(input,output, PLUS); } else { this->doTimes(input,output, MINUS); } } std::vector<T> getAbsRowSum(bool transposed) { std::vector<T> result(this->getNumRows()); #pragma omp parallel for for (int k = 0; k < this->getNumRows(); ++k) { result[k] = std::abs(this->diagonalElements[k]); } return result; } T getMaxRowSumAbs(bool transposed) { Tdata diagonalElementsCopy = this->diagonalElements; vectorAbs(diagonalElementsCopy); return vectorMax(diagonalElementsCopy); } #ifdef __CUDACC__ thrust::device_vector<T> getAbsRowSumCUDA(bool transposed) { Tdata diagonalElementsCopy = this->diagonalElements; vectorAbs(diagonalElementsCopy); return diagonalElementsCopy; } #endif private: void doTimesCPU(const Tdata &input, Tdata &output,const mySign s) { int numElements = (int)output.size(); #pragma omp parallel for for (int i = 0; i < numElements; ++i) { switch (s) { case PLUS: { output[i] += input[i] * this->diagonalElements[i]; break; } case MINUS: { output[i] -= input[i] * this->diagonalElements[i]; break; } case EQUALS: { output[i] = input[i] * this->diagonalElements[i]; break; } } } } void doTimes(const Tdata &input, Tdata &output,const mySign s) { #ifdef __CUDACC__ thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(output.begin(), input.begin(), this->diagonalElements.begin())), thrust::make_zip_iterator(thrust::make_tuple(output.end(), input.end(), this->diagonalElements.end())), flexDiagonalOperatorFunctor(s)); #else this->doTimesCPU(input,output,s); #endif } }; #endif
1d.np.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define myabs(x,y) ((x) > (y)? ((x)-(y)) : ((y)-(x))) #define myceil(x,y) (int)ceil(((double)x)/((double)y)) // if x and y are integers, myceil(x,y) = (x-1)/y + 1 #define myfloor(x,y) (int)floor(((double)x)/((double)y)) // if x and y are integers, myceil(x,y) = (x-1)/y + 1 #if !defined(point) #define point 3 #endif #if point == 3 #define kernel(A) A[(t+1)%2][x] = 0.25 * ((A[t%2][x+1] + 2.0 * A[t%2][x]) + A[t%2][x-1]) #define XSLOPE 1 #elif point == 5 #define kernel(A) A[(t+1)%2][x] = 0.125 * (1.4*A[t%2][x-2] + 1.6*A[t%2][x-1] + 2.0 * A[t%2][x] + 1.9*A[t%2][x+1] + 1.1*A[t%2][x+2]); #define XSLOPE 2 #endif #ifdef CHECK #define TOLERANCE 0 #endif int main(int argc, char * argv[]) { struct timeval start, end; long int i; int N = atoi(argv[1]); int T = atoi(argv[2]); int Bx = atoi(argv[3]); int tb = atoi(argv[4]); if(Bx<(2*XSLOPE+1) || Bx>N || tb>(((Bx-1)/2)/XSLOPE)){ return 0; } double (*A)[N+2*XSLOPE] = (double (*)[N+2*XSLOPE])malloc(sizeof(double)*(N+2*XSLOPE)*2); #ifdef CHECK double (*B)[N+2*XSLOPE] = (double (*)[N+2*XSLOPE])malloc(sizeof(double)*(N+2*XSLOPE)*2); #endif srand(100); for (i = 0; i < N+2*XSLOPE; i++) { A[0][i] = 1.0 * (rand() % 1024); A[1][i] = 0; #ifdef CHECK B[0][i] = A[0][i]; B[1][i] = 0; #endif } int bx = Bx - 2 * tb * XSLOPE; int ix = Bx + bx; // ix is even int nb0[2] = { myfloor(N-Bx,ix), myfloor(N-Bx,ix) + 1 }; int nrestpoints = N % ix; int bx_first_B1 = (Bx + nrestpoints)/2; int bx_last_B1 = (Bx + nrestpoints) - bx_first_B1; int xright[2] = {bx_first_B1 + Bx + XSLOPE, bx_first_B1 + (Bx - bx)/2 + XSLOPE}; int level = 0; int x, xx, t, tt; register int xmin, xmax; gettimeofday(&start, 0); for (tt = -tb; tt < T ; tt += tb ){ #pragma omp parallel for private(xmin,xmax,t,x) for(xx = 0; xx <nb0[level]; xx++) { for(t= max(tt, 0) ; t <min( tt + 2*tb, T); t++){ xmin = (level == 1 && xx == 0) ? XSLOPE : (xright[level] - Bx + xx*ix + myabs((tt+tb),(t+1))*XSLOPE); xmax = (level == 1 && xx == nb0[1] -1) ? N + XSLOPE : (xright[level] + xx*ix - myabs((tt+tb),(t+1))*XSLOPE); #pragma ivdep #pragma vector always for(x = xmin; x < xmax; x++){ kernel(A); } } } level = 1 - level; } gettimeofday(&end, 0); printf("GStencil/s = %f\n",((double)N * T) / (double)(end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) * 1.0e-6) / 1000000000L); #ifdef CHECK for (t = 0; t < T; t++) { for (x = XSLOPE; x < N + XSLOPE; x++) { kernel(B); } } for (i = XSLOPE; i < N + XSLOPE; i++) { if(myabs(A[T%2][i], B[T%2][i]) > TOLERANCE) printf("Naive[%d] = %f, Check = %f: FAILED!\n", i, B[T%2][i], A[T%2][i]); } #endif }
flags.c
#include <stdio.h> int arr[100]; int nt =12; int main() { #pragma omp target teams distribute parallel for num_threads(nt) for (int i=0; i<100;i++) arr[i] =i; //Verify int errors = 0; for (int i=0; i<100;i++){ if(arr[i] != i) errors++; } if(!errors){ fprintf(stderr, "Success\n"); return 0; } else{ fprintf(stderr, "Failed\nErrors: %d\n", errors); return 1; } }
GB_unaryop__lnot_int64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint16 // op(A') function: GB_tran__lnot_int64_uint16 // C type: int64_t // A type: uint16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint16 ( int64_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dsyrk.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zsyrk.c, normal z -> d, Fri Sep 28 17:38:03 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_syrk * * Performs one of the symmetric rank k operations * * \f[ C = \alpha A \times A^T + \beta C, \f] * or * \f[ C = \alpha A^T \times A + \beta C, \f] * * where alpha and beta are scalars, C is an n-by-n symmetric * matrix, and A is an n-by-k matrix in the first case and a k-by-n * matrix in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f] * - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= 0. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A matrix; * if trans = PlasmaTrans, number of rows of the A matrix. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * A is an lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaTrans, lda >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] pC * C is an ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_dsyrk * @sa plasma_csyrk * @sa plasma_dsyrk * @sa plasma_ssyrk * ******************************************************************************/ int plasma_dsyrk(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, double alpha, double *pA, int lda, double beta, double *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans)) { plasma_error("illegal value of trans"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (k < 0) { plasma_error("illegal value of k"); return -4; } int am, an; if (trans == PlasmaNoTrans) { am = n; an = k; } else { am = k; an = n; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -7; } if (ldc < imax(1, n)) { plasma_error("illegal value of ldc"); return -10; } // quick return if (n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_syrk(plasma, PlasmaRealDouble, n, k); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, n, n, 0, 0, n, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); plasma_omp_dge2desc(pC, ldc, C, &sequence, &request); // Call the tile async function. plasma_omp_dsyrk(uplo, trans, alpha, A, beta, C, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2ge(C, pC, ldc, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_syrk * * Performs rank k update. * Non-blocking tile version of plasma_dsyrk(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f] * - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f] * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] beta * The scalar beta. * * @param[in,out] C * Descriptor of matrix C. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dsyrk * @sa plasma_omp_dsyrk * @sa plasma_omp_csyrk * @sa plasma_omp_dsyrk * @sa plasma_omp_ssyrk * ******************************************************************************/ void plasma_omp_dsyrk(plasma_enum_t uplo, plasma_enum_t trans, double alpha, plasma_desc_t A, double beta, plasma_desc_t C, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans)) { plasma_error("illegal value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int k = trans == PlasmaNoTrans ? A.n : A.m; if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return; // Call the parallel function. plasma_pdsyrk(uplo, trans, alpha, A, beta, C, sequence, request); }
GB_binop__minus_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fc64) // A*D function (colscale): GB (_AxD__minus_fc64) // D*A function (rowscale): GB (_DxB__minus_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fc64) // C=scalar+B GB (_bind1st__minus_fc64) // C=scalar+B' GB (_bind1st_tran__minus_fc64) // C=A+scalar GB (_bind2nd__minus_fc64) // C=A'+scalar GB (_bind2nd_tran__minus_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_minus (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_minus (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FC64 || GxB_NO_MINUS_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fc64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_minus (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_minus (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_minus (x, aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_minus (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint32 // op(A') function: GB_tran__ainv_int16_uint32 // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint32 ( int16_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ga.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <mpi.h> #include <omp.h> #include "../include/imagen.h" #include "../include/ga.h" #include "../include/derivados_mpi.h" #define PRINT 1 static int aleatorio(int max) { return (rand() % (max + 1)); } void init_imagen_aleatoria(RGB *imagen, int max, int total) { for (int i = 0; i < total; i++) { imagen[i].r = aleatorio(max); imagen[i].g = aleatorio(max); imagen[i].b = aleatorio(max); } } RGB *imagen_aleatoria(int max, int total) { RGB *imagen = (RGB *)malloc(total * sizeof(RGB)); assert(imagen); init_imagen_aleatoria(imagen, max, total); return imagen; } static int comp_fitness(const void *a, const void *b) { /* qsort pasa un puntero al elemento que está ordenando */ return (*(Individuo *)a).fitness - (*(Individuo *)b).fitness; } void crear_imagen(const RGB *imagen_objetivo, int num_pixels, int ancho, int alto, int max, int num_generaciones, int tam_poblacion, RGB *imagen_resultado, const char *output_file) { double initial_time_fitness = 0; double final_time_fitness = 0; double total_time_fitness = 0; double fitness_anterior = 0, fitness_actual, diferencia_fitness; int rank, world_size; Individuo *poblacion = NULL; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &world_size); if (rank == 0) { poblacion = malloc(tam_poblacion * sizeof(Individuo)); assert(poblacion); /* Todos los nodos calculan su parte */ for (int i = 0; i < tam_poblacion; i++) { init_imagen_aleatoria(poblacion[i].imagen, max, num_pixels); } for (int i = 0; i < tam_poblacion; i++) { fitness(imagen_objetivo, &poblacion[i], num_pixels); } qsort(poblacion, tam_poblacion, sizeof(Individuo), comp_fitness); //final_time_fitness = MPI_Wtime(); //total_time_fitness += final_time_fitness - initial_time_fitness; // Ordenar individuos según la función de bondad (menor "fitness" --> más aptos) } MPI_Datatype rgb_type; MPI_Datatype individuo_type; crear_tipo_datos(num_pixels, &rgb_type, &individuo_type); int chunkSize = tam_poblacion / world_size; int leftover = tam_poblacion % world_size; Individuo *poblacionLocal = malloc(chunkSize * sizeof(Individuo)); MPI_Scatter(&poblacion[leftover], chunkSize, individuo_type, poblacionLocal, chunkSize, individuo_type, 0, MPI_COMM_WORLD); // B. Evolucionar la Población (durante un número de generaciones) for (int g = 0; g < num_generaciones; g++) { if(rank == 0) fitness_anterior = poblacion[0].fitness; int cruzarChunkSize = chunkSize/2; int cruzarLeftover = leftover/2; // Promocionar a los descendientes de los individuos más aptos if (rank == 0) { for (int i = 0; i < cruzarLeftover; i+=2) { cruzar(&poblacion[i], &poblacion[i+1], &poblacion[cruzarLeftover/2+i], &poblacion[cruzarLeftover/2+i+1], num_pixels); } } for (int i = 0; i < cruzarChunkSize; i+=2) { cruzar(&poblacionLocal[i], &poblacionLocal[i + 1], &poblacionLocal[cruzarChunkSize/2 + i], &poblacionLocal[cruzarChunkSize/2 + i + 1], num_pixels); } // Mutar una parte de la individuos de la población (se decide que muten tam_poblacion/4) int mutation_start = (tam_poblacion / 4) / world_size; if(rank == 0) { for(int i = leftover/4; i< leftover; i++){ mutar(&poblacion[i], max, num_pixels); } } for (int i = mutation_start; i < chunkSize; i++) { mutar(&poblacionLocal[i], max, num_pixels); } if (rank == 0) { initial_time_fitness = MPI_Wtime(); for (int i = 0; i < leftover; i++) { fitness(imagen_objetivo, &poblacion[i], num_pixels); } } /* Todos los nodos calculan su parte */ for (int i = 0; i < chunkSize; i++) { fitness(imagen_objetivo, &poblacionLocal[i], num_pixels); } // Cada 10 iteraciones recuperamos subpoblaciones, ordenamos y volvemos a distribuir if((g % 10 == 0 && g) || g == num_generaciones - 1){ MPI_Gather(poblacionLocal, chunkSize, individuo_type, &poblacion[leftover], chunkSize, individuo_type, 0, MPI_COMM_WORLD); // Cambiar el tipo de MPI por el derivado if (rank == 0) { qsort(poblacion, tam_poblacion, sizeof(Individuo), comp_fitness); final_time_fitness = MPI_Wtime(); total_time_fitness += final_time_fitness - initial_time_fitness; fitness_actual = poblacion[0].fitness; diferencia_fitness = -(fitness_actual - fitness_anterior) / fitness_actual * 100; if (PRINT) { printf("Generacion %d - ", g); printf("Fitness = %e - ", fitness_actual); printf("Diferencia con Fitness Anterior = %.2e%c\n", diferencia_fitness, 37); } } MPI_Scatter(&poblacion[leftover], chunkSize, individuo_type, poblacionLocal, chunkSize, individuo_type, 0, MPI_COMM_WORLD); } qsort(poblacionLocal, chunkSize, sizeof(Individuo), comp_fitness); } // Devuelve Imagen Resultante if (rank == 0) { //qsort(poblacion, tam_poblacion, sizeof(Individuo), comp_fitness); printf("Tiempo invertido en cálculo fitness: %f\n", total_time_fitness); memmove(imagen_resultado, poblacion[0].imagen, num_pixels * sizeof(RGB)); printf("Imagen movida\n"); // Release memory } if (rank == 0) free(poblacion); free(poblacionLocal); } void cruzar(Individuo *padre1, Individuo *padre2, Individuo *hijo1, Individuo *hijo2, int num_pixels) { // Elegir un "punto" de corte aleatorio a partir del cual se realiza el intercambio de los genes. // * Cruzar los genes de cada padre con su hijo // * Intercambiar los genes de cada hijo con los del otro padre int corte = aleatorio(num_pixels - 1); #pragma omp parallel { #pragma omp for for (int i = 0; i < corte; i++) { hijo1->imagen[i] = padre1->imagen[i]; hijo2->imagen[i] = padre2->imagen[i]; } #pragma omp for for (int i = corte; i < num_pixels; i++) { hijo1->imagen[i] = padre2->imagen[i]; hijo2->imagen[i] = padre1->imagen[i]; } } } void fitness(const RGB *objetivo, Individuo *individuo, int num_pixels) { // Determina la calidad del individuo (similitud con el objetivo) // calculando la suma de la distancia existente entre los pixeles double fitness = 0; #pragma omp parallel for reduction(+:fitness) for (int i = 0; i < num_pixels; i++) { fitness += abs(objetivo[i].r - individuo->imagen[i].r) + abs(objetivo[i].g - individuo->imagen[i].g) + abs(objetivo[i].b - individuo->imagen[i].b); } individuo->fitness = fitness; } void mutar(Individuo *actual, int max, int num_pixels) { // Cambia el valor de algunos puntos de la imagen de forma aleatoria. // Decidir cuantos pixels mutar. Si el valor es demasiado pequeño, // la convergencia es muy pequeña, y si es demasiado alto diverge. double ratioMutacion = 0.002; int numMutar = (int)num_pixels * ratioMutacion; for (int i = 0; i < numMutar; i++) { int index = aleatorio(num_pixels - 1); actual->imagen[index].r = aleatorio(max); actual->imagen[index].g = aleatorio(max); actual->imagen[index].b = aleatorio(max); } }
GB_binop__isge_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isge_int16) // A.*B function (eWiseMult): GB (_AemultB_03__isge_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int16) // A*D function (colscale): GB (_AxD__isge_int16) // D*A function (rowscale): GB (_DxB__isge_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isge_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isge_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int16) // C=scalar+B GB (_bind1st__isge_int16) // C=scalar+B' GB (_bind1st_tran__isge_int16) // C=A+scalar GB (_bind2nd__isge_int16) // C=A'+scalar GB (_bind2nd_tran__isge_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT16 || GxB_NO_ISGE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isge_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_5x5_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator); const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); int q=0; for (; q<inch-1; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* r3 = img0.row<const unsigned short>(3); const unsigned short* r4 = img0.row<const unsigned short>(4); const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j+3<outw; j+=4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"// sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"// r00 r01 r02 r03 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1] \n"// r04 r05 r06 r07 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r10 r11 r12 r13 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "fmla v23.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v1.s[2] \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "fmla v22.4s, v27.4s, v2.s[3] \n" "fmla v23.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2] \n"// r14 r15 r16 r17 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "fmla v23.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v3.s[3] \n" "fmla v23.4s, v19.4s, v4.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v5.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r20 r21 r22 r23 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3] \n"// r24 r25 r26 r27 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n"// r30 r31 r32 r33 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "fmla v23.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v1.s[2] \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "fmla v22.4s, v27.4s, v2.s[3] \n" "fmla v23.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4] \n"// r34 r35 r36 r37 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "fmla v23.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v3.s[3] \n" "fmla v23.4s, v19.4s, v4.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v5.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"// r40 r41 r42 r43 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5] \n"// r44 r45 r46 r47 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" // "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n"// sum0 sum1 sum2 sum3 "pld [%1, #256] \n" "vld1.u16 {d4-d7}, [%1 :64]! \n"// r00 r01 r02 r03 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%1, #256] \n" "vld1.u16 {d12-d15}, [%1 :64] \n"// r04 r05 r06 r07 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d6[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d7[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d5[1] \n" "vmla.f32 q14, q9, d7[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d8[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n"// r10 r11 r12 r13 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d0[0] \n" "vmla.f32 q13, q10, d2[0] \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d2[1] \n" "vmla.f32 q14, q11, d4[1] \n" "vmla.f32 q15, q11, d6[1] \n" "vmla.f32 q12, q8, d1[0] \n" "vmla.f32 q13, q8, d3[0] \n" "vmla.f32 q14, q8, d5[0] \n" "vmla.f32 q15, q8, d7[0] \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d3[1] \n" "vmla.f32 q14, q9, d5[1] \n" "vmla.f32 q15, q9, d7[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2 :64] \n"// r14 r15 r16 r17 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d4[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d8[1] \n" "vmla.f32 q15, q11, d10[1] \n" "vmla.f32 q12, q8, d5[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q8, d11[0] \n" "vmla.f32 q12, q9, d5[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d9[1] \n" "vmla.f32 q15, q9, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n"// r20 r21 r22 r23 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q10, d8[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d12[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d12[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d13[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d13[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3 :64] \n"// r24 r25 r26 r27 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d6[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d7[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d5[1] \n" "vmla.f32 q14, q9, d7[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d8[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n"// r30 r31 r32 r33 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d0[0] \n" "vmla.f32 q13, q10, d2[0] \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d2[1] \n" "vmla.f32 q14, q11, d4[1] \n" "vmla.f32 q15, q11, d6[1] \n" "vmla.f32 q12, q8, d1[0] \n" "vmla.f32 q13, q8, d3[0] \n" "vmla.f32 q14, q8, d5[0] \n" "vmla.f32 q15, q8, d7[0] \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d3[1] \n" "vmla.f32 q14, q9, d5[1] \n" "vmla.f32 q15, q9, d7[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64] \n"// r34 r35 r36 r37 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d4[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d8[1] \n" "vmla.f32 q15, q11, d10[1] \n" "vmla.f32 q12, q8, d5[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q8, d11[0] \n" "vmla.f32 q12, q9, d5[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d9[1] \n" "vmla.f32 q15, q9, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n"// r40 r41 r42 r43 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q10, d8[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d12[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d12[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d13[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d13[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d12-d15}, [%5 :64] \n"// r44 r45 r46 r47 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d6[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d7[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d5[1] \n" "vmla.f32 q14, q9, d7[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d8[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d13[1] \n" // "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :64] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j+1<outw; j+=2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4h, v1.4h}, [%1], #16 \n"// r00 r01 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v20.4s, v21.4s}, [%0] \n"// sum0 sum1 "fmul v22.4s, v16.4s, v0.s[0] \n" "fmul v23.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%1] \n"// r02 r03 r04 r05 "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n"// r10 r11 "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%2] \n"// r12 r13 r14 r15 "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v16.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n"// r20 r21 "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%3] \n"// r22 r23 r24 r25 "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4h, v1.4h}, [%4], #16 \n"// r30 r31 "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%4] \n"// r32 r33 r34 r35 "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v16.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4h, v1.4h}, [%5], #16 \n"// r40 r41 "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%5] \n"// r42 r43 r44 r45 "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" // "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%1, #128] \n" "vld1.u16 {d2-d3}, [%1 :64]! \n"// r00 r01 "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "pld [%1, #256] \n" "vld1.u16 {d8-d11}, [%1 :64] \n"// r02 r03 r04 r05 "vshll.u16 q8, d20, #16 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n"// sum0 sum1 "vmul.f32 q14, q8, d0[0] \n" "vshll.u16 q9, d21, #16 \n" "vmul.f32 q15, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n"// r10 r11 "vmla.f32 q14, q10, d6[0] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q9, d10[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d9[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d11[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%2, #256] \n" "vld1.u16 {d8-d11}, [%2 :64] \n"// r12 r13 r14 r15 "vmla.f32 q14, q10, d0[0] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q15, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d1[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d3[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d5[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, d7[1] \n" "pld [%3, #128] \n" "vld1.u16 {d2-d3}, [%3 :64]! \n"// r20 r21 "vmla.f32 q14, q8, d6[0] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d7[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d8[1] \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q11, d10[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d9[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q9, d11[1] \n" "pld [%3, #256] \n" "vld1.u16 {d8-d11}, [%3 :64] \n"// r22 r23 r24 r25 "vmla.f32 q14, q8, d0[0] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q15, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n"// r30 r31 "vmla.f32 q14, q10, d6[0] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q9, d10[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d9[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d11[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%4, #256] \n" "vld1.u16 {d8-d11}, [%4 :64] \n"// r32 r33 r34 r35 "vmla.f32 q14, q10, d0[0] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q15, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d1[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d3[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d5[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, d7[1] \n" "pld [%5, #128] \n" "vld1.u16 {d2-d3}, [%5 :64]! \n"// r40 r41 "vmla.f32 q14, q8, d6[0] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d7[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d8[1] \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q11, d10[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d9[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q9, d11[1] \n" "pld [%5, #256] \n" "vld1.u16 {d8-d11}, [%5 :64] \n"// r42 r43 r44 r45 "vmla.f32 q14, q8, d0[0] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q15, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, d7[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128] \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j<outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n"// r00 "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%1] \n"// r01 r02 r03 r04 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n"// sum0 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n"// r10 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%2] \n"// r11 r12 r13 r14 "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n"// r20 "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%3] \n"// r21 r22 r23 r24 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4], #8 \n"// r30 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%4] \n"// r31 r32 r33 r34 "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n"// r40 "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%5] \n"// r41 r42 r43 r44 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" // "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fadd v22.4s, v21.4s, v22.4s \n" "fadd v23.4s, v22.4s, v23.4s \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "st1 {v20.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%1, #64] \n" "vld1.u16 {d1}, [%1 :64]! \n"// r00 "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "pld [%0, #128] \n" "vld1.f32 {d24-d25}, [%0 :128] \n"// sum0 "vmul.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmul.f32 q14, q9, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmul.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%1, #256] \n" "vld1.u16 {d6-d9}, [%1 :64] \n"// r01 r02 r03 r04 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2 :64]! \n"// r10 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d0[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d1[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "pld [%2, #256] \n" "vld1.u16 {d6-d9}, [%2 :64] \n"// r11 r12 r13 r14 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%3, #64] \n" "vld1.u16 {d1}, [%3 :64]! \n"// r20 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q13, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%3, #256] \n" "vld1.u16 {d6-d9}, [%3 :64] \n"// r21 r22 r23 r24 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "pld [%4, #64] \n" "vld1.u16 {d1}, [%4 :64]! \n"// r30 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d0[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d1[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "pld [%4, #256] \n" "vld1.u16 {d6-d9}, [%4 :64] \n"// r31 r32 r33 r34 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n"// r40 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q13, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%5, #256] \n" "vld1.u16 {d6-d9}, [%5 :64] \n"// r41 r42 r43 r44 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" // "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vadd.f32 q13, q13, q14 \n" "vadd.f32 q12, q12, q15 \n" "vadd.f32 q12, q12, q13 \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "vst1.f32 {d24-d25}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } r0 += 4*4; r1 += 4*4; r2 += 4*4; r3 += 4*4; r4 += 4*4; } } for (; q<inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); const float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* r3 = img0.row<const unsigned short>(3); const unsigned short* r4 = img0.row<const unsigned short>(4); const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j+3<outw; j+=4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"// sum0 sum1 sum2 sum3 "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r00 r01 r02 r03 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2] \n"// r04 r05 r06 r07 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r10 r11 r12 r13 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "fmla v23.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v1.s[2] \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "fmla v22.4s, v27.4s, v2.s[3] \n" "fmla v23.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3] \n"// r14 r15 r16 r17 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "fmla v23.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v3.s[3] \n" "fmla v23.4s, v19.4s, v4.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v5.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n"// r20 r21 r22 r23 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4] \n"// r24 r25 r26 r27 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"// r30 r31 r32 r33 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "fmla v23.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v1.s[2] \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "fmla v22.4s, v27.4s, v2.s[3] \n" "fmla v23.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5] \n"// r34 r35 r36 r37 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "fmla v23.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v3.s[3] \n" "fmla v23.4s, v19.4s, v4.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v5.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n"// r40 r41 r42 r43 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6] \n"// r44 r45 r46 r47 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" // "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n"// sum0 sum1 sum2 sum3 "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n"// r00 r01 r02 r03 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2 :64] \n"// r04 r05 r06 r07 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d6[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d7[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d5[1] \n" "vmla.f32 q14, q9, d7[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d8[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n"// r10 r11 r12 r13 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d0[0] \n" "vmla.f32 q13, q10, d2[0] \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d2[1] \n" "vmla.f32 q14, q11, d4[1] \n" "vmla.f32 q15, q11, d6[1] \n" "vmla.f32 q12, q8, d1[0] \n" "vmla.f32 q13, q8, d3[0] \n" "vmla.f32 q14, q8, d5[0] \n" "vmla.f32 q15, q8, d7[0] \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d3[1] \n" "vmla.f32 q14, q9, d5[1] \n" "vmla.f32 q15, q9, d7[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3 :64] \n"// r14 r15 r16 r17 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d4[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d8[1] \n" "vmla.f32 q15, q11, d10[1] \n" "vmla.f32 q12, q8, d5[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q8, d11[0] \n" "vmla.f32 q12, q9, d5[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d9[1] \n" "vmla.f32 q15, q9, d11[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n"// r20 r21 r22 r23 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q10, d8[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d12[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d12[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d13[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d13[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64] \n"// r24 r25 r26 r27 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d6[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d7[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d5[1] \n" "vmla.f32 q14, q9, d7[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d8[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n"// r30 r31 r32 r33 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d0[0] \n" "vmla.f32 q13, q10, d2[0] \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d2[1] \n" "vmla.f32 q14, q11, d4[1] \n" "vmla.f32 q15, q11, d6[1] \n" "vmla.f32 q12, q8, d1[0] \n" "vmla.f32 q13, q8, d3[0] \n" "vmla.f32 q14, q8, d5[0] \n" "vmla.f32 q15, q8, d7[0] \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d3[1] \n" "vmla.f32 q14, q9, d5[1] \n" "vmla.f32 q15, q9, d7[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d12-d15}, [%5 :64] \n"// r34 r35 r36 r37 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d4[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d8[1] \n" "vmla.f32 q15, q11, d10[1] \n" "vmla.f32 q12, q8, d5[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q8, d11[0] \n" "vmla.f32 q12, q9, d5[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d9[1] \n" "vmla.f32 q15, q9, d11[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n"// r40 r41 r42 r43 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q10, d8[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d12[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d12[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d13[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d13[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6 :64] \n"// r44 r45 r46 r47 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d6[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d7[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d5[1] \n" "vmla.f32 q14, q9, d7[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d8[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d13[1] \n" // "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d26, q14, #16 \n" "vshrn.u32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j+1<outw; j+=2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n"// r00 r01 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v20.4s, v21.4s}, [%1], #32 \n"// sum0 sum1 "fmul v22.4s, v16.4s, v0.s[0] \n" "fmul v23.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%2] \n"// r02 r03 r04 r05 "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n"// r10 r11 "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%3] \n"// r12 r13 r14 r15 "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v16.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4h, v1.4h}, [%4], #16 \n"// r20 r21 "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%4] \n"// r22 r23 r24 r25 "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4h, v1.4h}, [%5], #16 \n"// r30 r31 "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%5] \n"// r32 r33 r34 r35 "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v16.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v0.4h, v1.4h}, [%6], #16 \n"// r40 r41 "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%6] \n"// r42 r43 r44 r45 "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" // "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "st1 {v20.4h, v21.4h}, [%0], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n"// r00 r01 "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "pld [%2, #256] \n" "vld1.u16 {d8-d11}, [%2 :64] \n"// r02 r03 r04 r05 "vshll.u16 q8, d20, #16 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// sum0 sum1 "vmul.f32 q14, q8, d0[0] \n" "vshll.u16 q9, d21, #16 \n" "vmul.f32 q15, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%3, #128] \n" "vld1.u16 {d2-d3}, [%3 :64]! \n"// r10 r11 "vmla.f32 q14, q10, d6[0] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q9, d10[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d9[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d11[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%3, #256] \n" "vld1.u16 {d8-d11}, [%3 :64] \n"// r12 r13 r14 r15 "vmla.f32 q14, q10, d0[0] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q15, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d1[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d3[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d5[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, d7[1] \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n"// r20 r21 "vmla.f32 q14, q8, d6[0] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d7[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d8[1] \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q11, d10[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d9[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q9, d11[1] \n" "pld [%4, #256] \n" "vld1.u16 {d8-d11}, [%4 :64] \n"// r22 r23 r24 r25 "vmla.f32 q14, q8, d0[0] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q15, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%5, #128] \n" "vld1.u16 {d2-d3}, [%5 :64]! \n"// r30 r31 "vmla.f32 q14, q10, d6[0] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q9, d10[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d9[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d11[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%5, #256] \n" "vld1.u16 {d8-d11}, [%5 :64] \n"// r32 r33 r34 r35 "vmla.f32 q14, q10, d0[0] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q15, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d1[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d3[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d5[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, d7[1] \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n"// r40 r41 "vmla.f32 q14, q8, d6[0] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d7[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d8[1] \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q11, d10[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d9[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q9, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d8-d11}, [%6 :64] \n"// r42 r43 r44 r45 "vmla.f32 q14, q8, d0[0] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q15, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, d7[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128] \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vst1.u16 {d24-d25}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j<outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n"// r00 "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v0.4s, v0.4h, #16 \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%2] \n"// r01 r02 r03 r04 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v20.4s}, [%1], #16 \n"// sum0 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n"// r10 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%3] \n"// r11 r12 r13 r14 "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4], #8 \n"// r20 "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%4] \n"// r21 r22 r23 r24 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n"// r30 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%5] \n"// r31 r32 r33 r34 "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%6, #64] \n" "ld1 {v0.4h}, [%6], #8 \n"// r40 "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%6] \n"// r41 r42 r43 r44 "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" // "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fadd v22.4s, v21.4s, v22.4s \n" "fadd v23.4s, v22.4s, v23.4s \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "shrn v20.4h, v20.4s, #16 \n" "st1 {v20.4h}, [%0], #8 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%2, #64] \n" "vld1.u16 {d1}, [%2 :64]! \n"// r00 "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1 :128]! \n"// sum0 "vmul.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmul.f32 q14, q9, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmul.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%2, #256] \n" "vld1.u16 {d6-d9}, [%2 :64] \n"// r01 r02 r03 r04 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "pld [%3, #64] \n" "vld1.u16 {d1}, [%3 :64]! \n"// r10 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d0[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d1[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "pld [%3, #256] \n" "vld1.u16 {d6-d9}, [%3 :64] \n"// r11 r12 r13 r14 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%4, #64] \n" "vld1.u16 {d1}, [%4 :64]! \n"// r20 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q13, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #256] \n" "vld1.u16 {d6-d9}, [%4 :64] \n"// r21 r22 r23 r24 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n"// r30 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d0[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d1[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "pld [%5, #256] \n" "vld1.u16 {d6-d9}, [%5 :64] \n"// r31 r32 r33 r34 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #64] \n" "vld1.u16 {d1}, [%6 :64]! \n"// r40 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q13, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #256] \n" "vld1.u16 {d6-d9}, [%6 :64] \n"// r41 r42 r43 r44 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q1, d6, #16 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" // "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vadd.f32 q13, q13, q14 \n" "vadd.f32 q12, q12, q15 \n" "vadd.f32 q12, q12, q13 \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "vshrn.u32 d24, q12, #16 \n" "vst1.u16 {d24}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } r0 += 4*4; r1 += 4*4; r2 += 4*4; r3 += 4*4; r4 += 4*4; } } } } static void conv5x5s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator); const int tailstep = (w - 2*outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); int q=0; for (; q<inch-1; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* r3 = img0.row<const unsigned short>(3); const unsigned short* r4 = img0.row<const unsigned short>(4); const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j+3<outw; j+=4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"// r00 r01 r02 r03 "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n"// r04 r05 r06 r07 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"// sum0 sum1 sum2 sum3 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%1, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%1] \n"// r08 r09 r010 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r10 r11 r12 r13 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n"// r14 r15 r16 r17 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%2] \n"// r18 r19 r110 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v7.s[0] \n" "fmla v23.4s, v16.4s, v29.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v7.s[1] \n" "fmla v23.4s, v17.4s, v29.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v7.s[2] \n" "fmla v23.4s, v18.4s, v29.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v7.s[3] \n" "fmla v23.4s, v19.4s, v29.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r20 r21 r22 r23 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v6.s[0] \n" "fmla v22.4s, v24.4s, v28.s[0] \n" "fmla v23.4s, v24.4s, v30.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "fmla v22.4s, v25.4s, v28.s[1] \n" "fmla v23.4s, v25.4s, v30.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v6.s[2] \n" "fmla v22.4s, v26.4s, v28.s[2] \n" "fmla v23.4s, v26.4s, v30.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "fmla v22.4s, v27.4s, v28.s[3] \n" "fmla v23.4s, v27.4s, v30.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n"// r24 r25 r26 r27 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%3] \n"// r28 r29 r210 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n"// r30 r31 r32 r33 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n"// r34 r35 r36 r37 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%4] \n"// r38 r39 r310 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v7.s[0] \n" "fmla v23.4s, v16.4s, v29.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v7.s[1] \n" "fmla v23.4s, v17.4s, v29.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v7.s[2] \n" "fmla v23.4s, v18.4s, v29.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v7.s[3] \n" "fmla v23.4s, v19.4s, v29.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"// r40 r41 r42 r43 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v6.s[0] \n" "fmla v22.4s, v24.4s, v28.s[0] \n" "fmla v23.4s, v24.4s, v30.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "fmla v22.4s, v25.4s, v28.s[1] \n" "fmla v23.4s, v25.4s, v30.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v6.s[2] \n" "fmla v22.4s, v26.4s, v28.s[2] \n" "fmla v23.4s, v26.4s, v30.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "fmla v22.4s, v27.4s, v28.s[3] \n" "fmla v23.4s, v27.4s, v30.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5], #32 \n"// r44 r45 r46 r47 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%5] \n"// r48 r49 r410 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" // "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n"// sum0 sum1 sum2 sum3 "pld [%1, #256] \n" "vld1.u16 {d4-d7}, [%1 :64]! \n"// r00 r01 r02 r03 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%1, #256] \n" "vld1.u16 {d12-d15}, [%1 :64]! \n"// r04 r05 r06 r07 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%1, #128] \n" "vld1.u16 {d2-d3}, [%1 :64]! \n"// r08 r09 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q10, d2[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d14[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d15[0] \n" "vmla.f32 q15, q8, d3[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d15[1] \n" "vmla.f32 q15, q9, d3[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%1, #64] \n" "vld1.u16 {d5}, [%1 :64] \n"// r010 "vshll.u16 q2, d5, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2 :64]! \n"// r10 r11 r12 r13 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n"// r14 r15 r16 r17 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q10, d8[0] \n" "vmla.f32 q13, q10, d12[0] \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d12[1] \n" "vmla.f32 q14, q11, d0[1] \n" "vmla.f32 q15, q11, d4[1] \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q8, d1[0] \n" "vmla.f32 q15, q8, d5[0] \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d13[1] \n" "vmla.f32 q14, q9, d1[1] \n" "vmla.f32 q15, q9, d5[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%2, #128] \n" "vld1.u16 {d10-d11}, [%2 :64]! \n"// r18 r19 "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q12, q10, d12[0] \n" "vmla.f32 q13, q10, d0[0] \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d12[1] \n" "vmla.f32 q13, q11, d0[1] \n" "vmla.f32 q14, q11, d4[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d13[0] \n" "vmla.f32 q13, q8, d1[0] \n" "vmla.f32 q14, q8, d5[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d13[1] \n" "vmla.f32 q13, q9, d1[1] \n" "vmla.f32 q14, q9, d5[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d14[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d14[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d15[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d15[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%2, #64] \n" "vld1.u16 {d13}, [%2 :64] \n"// r110 "vshll.u16 q6, d13, #16 \n" "vmla.f32 q12, q10, d0[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d8[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d1[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d5[1] \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n"// r20 r21 r22 r23 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q9, d9[1] \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3 :64]! \n"// r24 r25 r26 r27 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #128] \n" "vld1.u16 {d2-d3}, [%3 :64]! \n"// r28 r29 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q10, d2[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d14[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d15[0] \n" "vmla.f32 q15, q8, d3[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d15[1] \n" "vmla.f32 q15, q9, d3[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #64] \n" "vld1.u16 {d5}, [%3 :64] \n"// r210 "vshll.u16 q2, d5, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n"// r30 r31 r32 r33 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n"// r34 r35 r36 r37 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q10, d8[0] \n" "vmla.f32 q13, q10, d12[0] \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d12[1] \n" "vmla.f32 q14, q11, d0[1] \n" "vmla.f32 q15, q11, d4[1] \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q8, d1[0] \n" "vmla.f32 q15, q8, d5[0] \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d13[1] \n" "vmla.f32 q14, q9, d1[1] \n" "vmla.f32 q15, q9, d5[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%4, #128] \n" "vld1.u16 {d10-d11}, [%4 :64]! \n"// r38 r39 "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q12, q10, d12[0] \n" "vmla.f32 q13, q10, d0[0] \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d12[1] \n" "vmla.f32 q13, q11, d0[1] \n" "vmla.f32 q14, q11, d4[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d13[0] \n" "vmla.f32 q13, q8, d1[0] \n" "vmla.f32 q14, q8, d5[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d13[1] \n" "vmla.f32 q13, q9, d1[1] \n" "vmla.f32 q14, q9, d5[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d14[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d14[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d15[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d15[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%4, #64] \n" "vld1.u16 {d13}, [%4 :64] \n"// r310 "vshll.u16 q6, d13, #16 \n" "vmla.f32 q12, q10, d0[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d8[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d1[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d5[1] \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n"// r40 r41 r42 r43 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q9, d9[1] \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d12-d15}, [%5 :64]! \n"// r44 r45 r46 r47 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%5, #128] \n" "vld1.u16 {d2-d3}, [%5 :64]! \n"// r48 r49 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q10, d2[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d14[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d15[0] \n" "vmla.f32 q15, q8, d3[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d15[1] \n" "vmla.f32 q15, q9, d3[1] \n" // "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%5, #64] \n" "vld1.u16 {d5}, [%5 :64] \n"// r410 "vshll.u16 q2, d5, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" "sub %5, %5, #16 \n" "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j+1<outw; j+=2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"// r00 r01 r02 r03 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v20.4s, v21.4s}, [%0] \n"// sum0 sum1 "fmul v22.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v17.4s, v17.4h, #16 \n" "fmul v23.4s, v16.4s, v2.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%1, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%1] \n"// r04 r05 r06 "shll v25.4s, v25.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r10 r11 r12 r13 "shll v17.4s, v17.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%2] \n"// r14 r15 r16 "shll v17.4s, v17.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r20 r21 r22 r23 "shll v25.4s, v25.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%3] \n"// r24 r25 r26 "shll v25.4s, v25.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n"// r30 r31 r32 r33 "shll v17.4s, v17.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%4] \n"// r34 r35 r36 "shll v17.4s, v17.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"// r40 r41 r42 r43 "shll v25.4s, v25.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%5] \n"// r44 r45 r46 "shll v25.4s, v25.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" // "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.u16 {d4-d7}, [%1 :64]! \n"// r00 r01 r02 r03 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n"// sum0 sum1 "vmul.f32 q14, q8, d0[0] \n" "vmul.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%1, #192] \n" "vld1.u16 {d10-d12}, [%1 :64] \n"// r04 r05 r06 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n"// r10 r11 r12 r13 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d9[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d13[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d1[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d5[1] \n" "pld [%2, #192] \n" "vld1.u16 {d10-d12}, [%2 :64] \n"// r14 r15 r16 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d3[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d5[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d7[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d11[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n"// r20 r21 r22 r23 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d12[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d9[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d13[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d13[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%3, #192] \n" "vld1.u16 {d10-d12}, [%3 :64] \n"// r24 r25 r26 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n"// r30 r31 r32 r33 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d9[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d13[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d1[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d5[1] \n" "pld [%4, #192] \n" "vld1.u16 {d10-d12}, [%4 :64] \n"// r34 r35 r36 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d3[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d5[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d7[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d11[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n"// r40 r41 r42 r43 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d12[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d9[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d13[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d13[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%5, #192] \n" "vld1.u16 {d10-d12}, [%5 :64] \n"// r44 r45 r46 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" // "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128] \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j<outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n"// sum0 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4h, v1.4h}, [%1], #16 \n"// r00 r01 "shll v0.4s, v0.4h, #16 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%1, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%1] \n"// r02 r03 r04 "shll v25.4s, v25.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n"// r10 r11 "shll v17.4s, v17.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%2] \n"// r12 r13 r14 "shll v17.4s, v17.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n"// r20 r21 "shll v25.4s, v25.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%3] \n"// r22 r23 r24 "shll v25.4s, v25.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4h, v1.4h}, [%4], #16 \n"// r30 r31 "shll v17.4s, v17.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%4] \n"// r32 r33 r34 "shll v17.4s, v17.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4h, v1.4h}, [%5], #16 \n"// r40 r41 "shll v25.4s, v25.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%5] \n"// r42 r43 r44 "shll v25.4s, v25.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" // "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fadd v22.4s, v21.4s, v22.4s \n" "fadd v23.4s, v22.4s, v23.4s \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "st1 {v20.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%1, #128] \n" "vld1.u16 {d2-d3}, [%1 :64]! \n"// r00 r01 "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "pld [%0, #128] \n" "vld1.f32 {d24-d25}, [%0 :128] \n"// sum0 "vmul.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmul.f32 q14, q9, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmul.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%1, #192] \n" "vld1.u16 {d6-d8}, [%1 :64] \n"// r02 r03 r04 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n"// r10 r11 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d0[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d1[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "pld [%2, #192] \n" "vld1.u16 {d6-d8}, [%2 :64] \n"// r12 r13 r14 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%3, #128] \n" "vld1.u16 {d2-d3}, [%3 :64]! \n"// r20 r21 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%3, #192] \n" "vld1.u16 {d6-d8}, [%3 :64] \n"// r22 r23 r24 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n"// r30 r31 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d0[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d1[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "pld [%4, #192] \n" "vld1.u16 {d6-d8}, [%4 :64] \n"// r32 r33 r34 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d6[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%5, #128] \n" "vld1.u16 {d2-d3}, [%5 :64]! \n"// r40 r41 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d8[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d0[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%5, #192] \n" "vld1.u16 {d6-d8}, [%5 :64] \n"// r42 r43 r44 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%6, #256] \n" "vld1.u16 {d16-d19}, [%6 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" // "pld [%6, #256] \n" "vld1.u16 {d20-d23}, [%6 :128] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vadd.f32 q14, q13, q14 \n" "vadd.f32 q15, q14, q15 \n" "vadd.f32 q12, q12, q15 \n" "sub %6, %6, #768 \n"// kptr -= 24 * 16; "vst1.f32 {d24-d25}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } for (; q<inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); const float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* r3 = img0.row<const unsigned short>(3); const unsigned short* r4 = img0.row<const unsigned short>(4); const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j+3<outw; j+=4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r00 r01 r02 r03 "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n"// r04 r05 r06 r07 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"// sum0 sum1 sum2 sum3 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%2] \n"// r08 r09 r010 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r10 r11 r12 r13 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n"// r14 r15 r16 r17 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%3] \n"// r18 r19 r110 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v7.s[0] \n" "fmla v23.4s, v16.4s, v29.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v7.s[1] \n" "fmla v23.4s, v17.4s, v29.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v7.s[2] \n" "fmla v23.4s, v18.4s, v29.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v7.s[3] \n" "fmla v23.4s, v19.4s, v29.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n"// r20 r21 r22 r23 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v6.s[0] \n" "fmla v22.4s, v24.4s, v28.s[0] \n" "fmla v23.4s, v24.4s, v30.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "fmla v22.4s, v25.4s, v28.s[1] \n" "fmla v23.4s, v25.4s, v30.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v6.s[2] \n" "fmla v22.4s, v26.4s, v28.s[2] \n" "fmla v23.4s, v26.4s, v30.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "fmla v22.4s, v27.4s, v28.s[3] \n" "fmla v23.4s, v27.4s, v30.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n"// r24 r25 r26 r27 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%4] \n"// r28 r29 r210 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"// r30 r31 r32 r33 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5], #32 \n"// r34 r35 r36 r37 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%5] \n"// r38 r39 r310 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v7.s[0] \n" "fmla v23.4s, v16.4s, v29.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v7.s[1] \n" "fmla v23.4s, v17.4s, v29.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v7.s[2] \n" "fmla v23.4s, v18.4s, v29.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v7.s[3] \n" "fmla v23.4s, v19.4s, v29.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n"// r40 r41 r42 r43 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v6.s[0] \n" "fmla v22.4s, v24.4s, v28.s[0] \n" "fmla v23.4s, v24.4s, v30.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "fmla v22.4s, v25.4s, v28.s[1] \n" "fmla v23.4s, v25.4s, v30.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v6.s[2] \n" "fmla v22.4s, v26.4s, v28.s[2] \n" "fmla v23.4s, v26.4s, v30.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "fmla v22.4s, v27.4s, v28.s[3] \n" "fmla v23.4s, v27.4s, v30.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"// r44 r45 r46 r47 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%6, #192] \n" "ld1 {v28.4h, v29.4h, v30.4h}, [%6] \n"// r48 r49 r410 "shll v28.4s, v28.4h, #16 \n" "shll v29.4s, v29.4h, #16 \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" // "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); #else // __aarch64__ asm volatile( "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n"// sum0 sum1 sum2 sum3 "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n"// r00 r01 r02 r03 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2 :64]! \n"// r04 r05 r06 r07 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n"// r08 r09 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q10, d2[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d14[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d15[0] \n" "vmla.f32 q15, q8, d3[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d15[1] \n" "vmla.f32 q15, q9, d3[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #64] \n" "vld1.u16 {d5}, [%2 :64] \n"// r010 "vshll.u16 q2, d5, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3 :64]! \n"// r10 r11 r12 r13 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n"// r14 r15 r16 r17 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q10, d8[0] \n" "vmla.f32 q13, q10, d12[0] \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d12[1] \n" "vmla.f32 q14, q11, d0[1] \n" "vmla.f32 q15, q11, d4[1] \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q8, d1[0] \n" "vmla.f32 q15, q8, d5[0] \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d13[1] \n" "vmla.f32 q14, q9, d1[1] \n" "vmla.f32 q15, q9, d5[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%3, #128] \n" "vld1.u16 {d10-d11}, [%3 :64]! \n"// r18 r19 "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q12, q10, d12[0] \n" "vmla.f32 q13, q10, d0[0] \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d12[1] \n" "vmla.f32 q13, q11, d0[1] \n" "vmla.f32 q14, q11, d4[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d13[0] \n" "vmla.f32 q13, q8, d1[0] \n" "vmla.f32 q14, q8, d5[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d13[1] \n" "vmla.f32 q13, q9, d1[1] \n" "vmla.f32 q14, q9, d5[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d14[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d14[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d15[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d15[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%3, #64] \n" "vld1.u16 {d13}, [%3 :64] \n"// r110 "vshll.u16 q6, d13, #16 \n" "vmla.f32 q12, q10, d0[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d8[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d1[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d5[1] \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n"// r20 r21 r22 r23 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q9, d9[1] \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n"// r24 r25 r26 r27 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n"// r28 r29 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q10, d2[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d14[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d15[0] \n" "vmla.f32 q15, q8, d3[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d15[1] \n" "vmla.f32 q15, q9, d3[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%4, #64] \n" "vld1.u16 {d5}, [%4 :64] \n"// r210 "vshll.u16 q2, d5, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%5, #256] \n" "vld1.u16 {d12-d15}, [%5 :64]! \n"// r30 r31 r32 r33 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n"// r34 r35 r36 r37 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q10, d8[0] \n" "vmla.f32 q13, q10, d12[0] \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d12[1] \n" "vmla.f32 q14, q11, d0[1] \n" "vmla.f32 q15, q11, d4[1] \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q8, d1[0] \n" "vmla.f32 q15, q8, d5[0] \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d13[1] \n" "vmla.f32 q14, q9, d1[1] \n" "vmla.f32 q15, q9, d5[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%5, #128] \n" "vld1.u16 {d10-d11}, [%5 :64]! \n"// r38 r39 "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q12, q10, d12[0] \n" "vmla.f32 q13, q10, d0[0] \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vmla.f32 q12, q11, d12[1] \n" "vmla.f32 q13, q11, d0[1] \n" "vmla.f32 q14, q11, d4[1] \n" "vmla.f32 q15, q11, d8[1] \n" "vmla.f32 q12, q8, d13[0] \n" "vmla.f32 q13, q8, d1[0] \n" "vmla.f32 q14, q8, d5[0] \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q12, q9, d13[1] \n" "vmla.f32 q13, q9, d1[1] \n" "vmla.f32 q14, q9, d5[1] \n" "vmla.f32 q15, q9, d9[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d14[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d14[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d15[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d15[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "pld [%5, #64] \n" "vld1.u16 {d13}, [%5 :64] \n"// r310 "vshll.u16 q6, d13, #16 \n" "vmla.f32 q12, q10, d0[0] \n" "vmla.f32 q13, q10, d4[0] \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d4[1] \n" "vmla.f32 q14, q11, d8[1] \n" "vmla.f32 q15, q11, d12[1] \n" "vmla.f32 q12, q8, d1[0] \n" "vmla.f32 q13, q8, d5[0] \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d5[1] \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n"// r40 r41 r42 r43 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q9, d9[1] \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6 :64]! \n"// r44 r45 r46 r47 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d2[0] \n" "vmla.f32 q13, q10, d6[0] \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "vmla.f32 q14, q11, d10[1] \n" "vmla.f32 q15, q11, d14[1] \n" "vmla.f32 q12, q8, d3[0] \n" "vmla.f32 q13, q8, d7[0] \n" "vmla.f32 q14, q8, d11[0] \n" "vmla.f32 q15, q8, d15[0] \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vmla.f32 q14, q9, d11[1] \n" "vmla.f32 q15, q9, d15[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n"// r48 r49 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q10, d16, #16 \n" "vshll.u16 q11, d17, #16 \n" "vshll.u16 q8, d18, #16 \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q12, q10, d6[0] \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q10, d2[0] \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "vmla.f32 q14, q11, d14[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vmla.f32 q12, q8, d7[0] \n" "vmla.f32 q13, q8, d11[0] \n" "vmla.f32 q14, q8, d15[0] \n" "vmla.f32 q15, q8, d3[0] \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vmla.f32 q14, q9, d15[1] \n" "vmla.f32 q15, q9, d3[1] \n" // "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%6, #64] \n" "vld1.u16 {d5}, [%6 :64] \n"// r410 "vshll.u16 q2, d5, #16 \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" "sub %5, %5, #16 \n" "sub %6, %6, #16 \n" "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d26, q14, #16 \n" "vshrn.u32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j+1<outw; j+=2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r00 r01 r02 r03 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v20.4s, v21.4s}, [%1], #32 \n"// sum0 sum1 "fmul v22.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v17.4s, v17.4h, #16 \n" "fmul v23.4s, v16.4s, v2.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%2] \n"// r04 r05 r06 "shll v25.4s, v25.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r10 r11 r12 r13 "shll v17.4s, v17.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%3] \n"// r14 r15 r16 "shll v17.4s, v17.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n"// r20 r21 r22 r23 "shll v25.4s, v25.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%4] \n"// r24 r25 r26 "shll v25.4s, v25.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"// r30 r31 r32 r33 "shll v17.4s, v17.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%5] \n"// r34 r35 r36 "shll v17.4s, v17.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n"// r40 r41 r42 r43 "shll v25.4s, v25.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%6, #192] \n" "ld1 {v4.4h, v5.4h, v6.4h}, [%6] \n"// r44 r45 r46 "shll v25.4s, v25.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "fmla v22.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v22.4s, v24.4s, v3.s[0] \n" // "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "st1 {v20.4h, v21.4h}, [%0], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n"// r00 r01 r02 r03 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// sum0 sum1 "vmul.f32 q14, q8, d0[0] \n" "vmul.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%2, #192] \n" "vld1.u16 {d10-d12}, [%2 :64] \n"// r04 r05 r06 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n"// r10 r11 r12 r13 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d9[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d13[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d1[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d5[1] \n" "pld [%3, #192] \n" "vld1.u16 {d10-d12}, [%3 :64] \n"// r14 r15 r16 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d3[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d5[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d7[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d11[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n"// r20 r21 r22 r23 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d12[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d9[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d13[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d13[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #192] \n" "vld1.u16 {d10-d12}, [%4 :64] \n"// r24 r25 r26 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n"// r30 r31 r32 r33 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d9[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d13[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d0[1] \n" "vmla.f32 q13, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d1[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "vmla.f32 q13, q9, d5[1] \n" "pld [%5, #192] \n" "vld1.u16 {d10-d12}, [%5 :64] \n"// r34 r35 r36 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d3[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d4[0] \n" "vmla.f32 q15, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d4[1] \n" "vmla.f32 q13, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d5[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vmla.f32 q13, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d7[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d11[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n"// r40 r41 r42 r43 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, d8[0] \n" "vmla.f32 q15, q10, d12[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d8[1] \n" "vmla.f32 q13, q11, d12[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d9[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d13[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vmla.f32 q13, q9, d13[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #192] \n" "vld1.u16 {d10-d12}, [%6 :64] \n"// r44 r45 r46 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q14, q10, d2[0] \n" "vmla.f32 q15, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d2[1] \n" "vmla.f32 q13, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vmla.f32 q14, q8, d3[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vmla.f32 q13, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vmla.f32 q14, q10, d5[0] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q14, q10, d6[0] \n" "vmla.f32 q15, q10, d10[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q12, q11, d6[1] \n" "vmla.f32 q13, q11, d10[1] \n" // "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128] \n" "vmla.f32 q14, q8, d7[0] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d11[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vmla.f32 q13, q9, d11[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vst1.u16 {d24-d25}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j<outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v20.4s}, [%1], #16 \n"// sum0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n"// r00 r01 "shll v0.4s, v0.4h, #16 \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v1.4s, v1.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%2] \n"// r02 r03 r04 "shll v25.4s, v25.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n"// r10 r11 "shll v17.4s, v17.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%3] \n"// r12 r13 r14 "shll v17.4s, v17.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4h, v1.4h}, [%4], #16 \n"// r20 r21 "shll v25.4s, v25.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%4] \n"// r22 r23 r24 "shll v25.4s, v25.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4h, v1.4h}, [%5], #16 \n"// r30 r31 "shll v17.4s, v17.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%5] \n"// r32 r33 r34 "shll v17.4s, v17.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v0.4h, v1.4h}, [%6], #16 \n"// r40 r41 "shll v25.4s, v25.4h, #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%6, #192] \n" "ld1 {v2.4h, v3.4h, v4.4h}, [%6] \n"// r42 r43 r44 "shll v25.4s, v25.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "shll v24.4s, v24.4h, #16 \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "shll v25.4s, v25.4h, #16 \n" "fmla v21.4s, v24.4s, v3.s[0] \n" // "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n" "shll v26.4s, v26.4h, #16 \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "shll v27.4s, v27.4h, #16 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "shll v16.4s, v16.4h, #16 \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "shll v17.4s, v17.4h, #16 \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "shll v18.4s, v18.4h, #16 \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fadd v22.4s, v21.4s, v22.4s \n" "fadd v23.4s, v22.4s, v23.4s \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "shrn v20.4h, v20.4s, #16 \n" "st1 {v20.4h}, [%0], #8 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n"// r00 r01 "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1 :128]! \n"// sum0 "vmul.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmul.f32 q14, q9, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmul.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%2, #192] \n" "vld1.u16 {d6-d8}, [%2 :64] \n"// r02 r03 r04 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "pld [%3, #128] \n" "vld1.u16 {d2-d3}, [%3 :64]! \n"// r10 r11 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d0[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d1[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "pld [%3, #192] \n" "vld1.u16 {d6-d8}, [%3 :64] \n"// r12 r13 r14 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n"// r20 r21 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #192] \n" "vld1.u16 {d6-d8}, [%4 :64] \n"// r22 r23 r24 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "pld [%5, #128] \n" "vld1.u16 {d2-d3}, [%5 :64]! \n"// r30 r31 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d9[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d0[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d1[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d1[1] \n" "pld [%5, #192] \n" "vld1.u16 {d6-d8}, [%5 :64] \n"// r32 r33 r34 "vshll.u16 q9, d21, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q8, d2[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d3[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d3[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d4[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d5[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d5[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d6[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d6[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d7[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n"// r40 r41 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q13, q10, d8[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d8[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d9[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d9[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d0[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d0[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d1[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #192] \n" "vld1.u16 {d6-d8}, [%6 :64] \n"// r42 r43 r44 "vshll.u16 q11, d17, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vmla.f32 q13, q10, d2[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d2[1] \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128]! \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d3[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d3[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d4[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d4[1] \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :128]! \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d5[0] \n" "vshll.u16 q10, d16, #16 \n" "vmla.f32 q12, q11, d5[1] \n" "vshll.u16 q11, d17, #16 \n" "vmla.f32 q13, q10, d6[0] \n" "vshll.u16 q8, d18, #16 \n" "vmla.f32 q14, q11, d6[1] \n" // "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :128] \n" "vshll.u16 q9, d19, #16 \n" "vmla.f32 q15, q8, d7[0] \n" "vshll.u16 q8, d20, #16 \n" "vmla.f32 q12, q9, d7[1] \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q13, q8, d8[0] \n" "vshll.u16 q10, d22, #16 \n" "vmla.f32 q14, q9, d8[1] \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vadd.f32 q14, q13, q14 \n" "vadd.f32 q15, q14, q15 \n" "vadd.f32 q12, q12, q15 \n" "sub %7, %7, #768 \n"// kptr -= 24 * 16; "vshrn.u32 d24, q12, #16 \n" "vst1.u16 {d24}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } } }
volumeramsubset.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2013-2019 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_VOLUMERAMSUBSET_H #define IVW_VOLUMERAMSUBSET_H #include <modules/base/basemoduledefine.h> #include <inviwo/core/datastructures/volume/volumeramprecision.h> #include <inviwo/core/datastructures/volume/volumeborder.h> namespace inviwo { class IVW_MODULE_BASE_API VolumeRAMSubSet { public: static std::shared_ptr<VolumeRAM> apply(const VolumeRepresentation* in, size3_t dim, size3_t offset, const VolumeBorders& border = VolumeBorders(), bool clampBorderOutsideVolume = true); }; namespace detail { struct IVW_MODULE_BASE_API VolumeRAMSubSetDispatcher { using type = std::shared_ptr<VolumeRAM>; template <typename Result, typename T> std::shared_ptr<VolumeRAM> operator()(const VolumeRepresentation* in, size3_t dim, size3_t offset, const VolumeBorders& border, bool clampBorderOutsideVolume); }; template <typename Result, typename DataType> std::shared_ptr<VolumeRAM> VolumeRAMSubSetDispatcher::operator()(const VolumeRepresentation* in, size3_t dim, size3_t offset, const VolumeBorders& border, bool clampBorderOutsideVolume) { using T = typename DataType::type; const VolumeRAMPrecision<T>* volume = dynamic_cast<const VolumeRAMPrecision<T>*>(in); if (!volume) return nullptr; // determine parameters const size3_t dataDims{volume->getDimensions()}; const size3_t copyDataDims{static_cast<size3_t>(glm::max( static_cast<ivec3>(dim) - glm::max(static_cast<ivec3>(offset + dim) - static_cast<ivec3>(dataDims), ivec3(0)), ivec3(0)))}; ivec3 newOffset_Dims = static_cast<ivec3>(glm::min(offset, dataDims) - border.llf); VolumeBorders trueBorder = VolumeBorders(); VolumeBorders correctBorder = border; if (clampBorderOutsideVolume) { correctBorder.llf += static_cast<size3_t>(-glm::min(newOffset_Dims, ivec3(0, 0, 0))); correctBorder.urb += static_cast<size3_t>( -glm::min(static_cast<ivec3>(dataDims) - static_cast<ivec3>(offset + copyDataDims + correctBorder.urb), ivec3(0, 0, 0))); newOffset_Dims = static_cast<ivec3>(offset - correctBorder.llf); } else { trueBorder.llf = static_cast<size3_t>(-glm::min(newOffset_Dims, ivec3(0, 0, 0))); trueBorder.urb = static_cast<size3_t>( glm::max(static_cast<ivec3>(offset + copyDataDims + correctBorder.urb) - static_cast<ivec3>(dataDims), ivec3(0, 0, 0))); } size3_t newOffset_DimsU = static_cast<size3_t>(glm::max(newOffset_Dims, ivec3(0, 0, 0))); size_t initialStartPos = (newOffset_DimsU.z * (dataDims.x * dataDims.y)) + (newOffset_DimsU.y * dataDims.x) + newOffset_DimsU.x; size3_t dimsWithBorder = dim + correctBorder.llf + correctBorder.urb; size3_t copyDimsWithoutBorder = static_cast<size3_t>( glm::max(static_cast<ivec3>(copyDataDims + correctBorder.llf + correctBorder.urb) - static_cast<ivec3>(trueBorder.llf) - static_cast<ivec3>(trueBorder.urb), ivec3(1, 1, 1))); // per row size_t dataSize = copyDimsWithoutBorder.x * static_cast<size_t>(volume->getDataFormat()->getSize()); // allocate space auto newVolume = std::make_shared<VolumeRAMPrecision<T>>(dim + correctBorder.llf + correctBorder.urb); const T* src = static_cast<const T*>(volume->getData()); T* dst = static_cast<T*>(newVolume->getData()); // memcpy each row for every slice to form sub volume for (int i = 0; i < static_cast<int>(copyDimsWithoutBorder.z); i++) { #pragma omp parallel for for (int j = 0; j < static_cast<int>(copyDimsWithoutBorder.y); j++) { size_t volumePos = (j * dataDims.x) + (i * dataDims.x * dataDims.y); size_t subVolumePos = ((j + trueBorder.llf.y) * dimsWithBorder.x) + ((i + trueBorder.llf.z) * dimsWithBorder.x * dimsWithBorder.y) + trueBorder.llf.x; std::memcpy(dst + subVolumePos, (src + volumePos + initialStartPos), dataSize); } } return newVolume; } } // namespace detail } // namespace inviwo #endif // IVW_VOLUMERAMSUBSET_H
interp2.c
/* * Academic License - for use in teaching, academic research, and meeting * course requirements at degree granting institutions only. Not for * government, commercial, or other organizational use. * * interp2.c * * Code generation for function 'interp2' * */ /* Include files */ #include "interp2.h" #include "combo_get_flux_coefficients_data.h" #include "combo_get_flux_coefficients_emxutil.h" #include "combo_get_flux_coefficients_types.h" #include "eml_int_forloop_overflow_check.h" #include "rt_nonfinite.h" #include "mwmathutil.h" /* Variable Definitions */ static emlrtRSInfo cc_emlrtRSI = { 274,/* lineNo */ "interp2_local", /* fcnName */ "C:\\Program Files\\MATLAB\\R2020b\\toolbox\\eml\\lib\\matlab\\polyfun\\interp2.m"/* pathName */ }; static emlrtRTEInfo je_emlrtRTEI = { 268,/* lineNo */ 21, /* colNo */ "interp2", /* fName */ "C:\\Program Files\\MATLAB\\R2020b\\toolbox\\eml\\lib\\matlab\\polyfun\\interp2.m"/* pName */ }; /* Function Definitions */ void interp2_local(const emlrtStack *sp, const emxArray_real_T *V, const emxArray_real_T *Xq, const emxArray_real_T *Yq, emxArray_real_T *Vq) { jmp_buf * volatile emlrtJBStack; emlrtStack b_st; emlrtStack st; real_T qx1; real_T qx2; real_T rx; real_T ry; real_T zx1y2; int32_T ix; int32_T ixmax; int32_T iy; int32_T iymax; int32_T k; int32_T ub_loop; st.prev = sp; st.tls = sp->tls; b_st.prev = &st; b_st.tls = st.tls; ixmax = Vq->size[0]; Vq->size[0] = Xq->size[0]; emxEnsureCapacity_real_T(sp, Vq, ixmax, &je_emlrtRTEI); ixmax = V->size[1] - 1; iymax = V->size[0] - 1; st.site = &cc_emlrtRSI; if ((1 <= Xq->size[0]) && (Xq->size[0] > 2147483646)) { b_st.site = &hb_emlrtRSI; check_forloop_overflow_error(&b_st); } ub_loop = Xq->size[0] - 1; emlrtEnterParallelRegion(sp, omp_in_parallel()); emlrtPushJmpBuf(sp, &emlrtJBStack); #pragma omp parallel for \ num_threads(emlrtAllocRegionTLSs(sp->tls, omp_in_parallel(), omp_get_max_threads(), omp_get_num_procs())) \ private(ix,iy,ry,qx1,zx1y2,qx2,rx) for (k = 0; k <= ub_loop; k++) { if ((Xq->data[k] >= 1.0) && (Xq->data[k] <= V->size[1]) && (Yq->data[k] >= 1.0) && (Yq->data[k] <= V->size[0])) { if (Xq->data[k] <= 1.0) { ix = 1; } else if (Xq->data[k] <= ixmax) { ix = (int32_T)muDoubleScalarFloor(Xq->data[k]); } else { ix = ixmax; } if (Yq->data[k] <= 1.0) { iy = 1; } else if (Yq->data[k] <= iymax) { iy = (int32_T)muDoubleScalarFloor(Yq->data[k]); } else { iy = iymax; } ry = V->data[(iy + V->size[0] * (ix - 1)) - 1]; qx1 = V->data[(iy + V->size[0] * ix) - 1]; zx1y2 = V->data[iy + V->size[0] * (ix - 1)]; qx2 = V->data[iy + V->size[0] * ix]; if (Xq->data[k] == ix) { qx1 = ry; qx2 = zx1y2; } else { if (!(Xq->data[k] == (real_T)ix + 1.0)) { rx = (Xq->data[k] - (real_T)ix) / (((real_T)ix + 1.0) - (real_T)ix); if (ry == qx1) { qx1 = ry; } else { qx1 = (1.0 - rx) * ry + rx * qx1; } if (zx1y2 == qx2) { qx2 = zx1y2; } else { qx2 = (1.0 - rx) * zx1y2 + rx * qx2; } } } if ((Yq->data[k] == iy) || (qx1 == qx2)) { Vq->data[k] = qx1; } else if (Yq->data[k] == (real_T)iy + 1.0) { Vq->data[k] = qx2; } else { ry = (Yq->data[k] - (real_T)iy) / (((real_T)iy + 1.0) - (real_T)iy); Vq->data[k] = (1.0 - ry) * qx1 + ry * qx2; } } else { Vq->data[k] = rtNaN; } } emlrtPopJmpBuf(sp, &emlrtJBStack); emlrtExitParallelRegion(sp, omp_in_parallel()); } /* End of code generation (interp2.c) */
sensitivity_mapper.h
#pragma once #if _OPENMP #include <omp.h> #endif #include "util/random.h" #include "3d/geometry/voxel_set.h" #include "3d/geometry/event_generator.h" namespace PET3D { namespace Hybrid { /// Creates a map of 3D scanner sensitivity template <class ScannerClass> class SensitivityMapper { public: using Scanner = ScannerClass; using F = typename Scanner::F; using S = typename Scanner::S; using VoxelSet = PET3D::VoxelSet<F, S>; using Event = typename Scanner::Event; SensitivityMapper(Scanner& scanner, VoxelSet& voxel_set) : scanner(scanner), voxel_set(voxel_set), one_dis(0, 1) {} template <class RNG, typename AcceptanceModel> void map(int i_voxel, const Voxel<S>& voxel, RNG& rng, AcceptanceModel& model, int n_emissions) { auto pixel_size = voxel_set.grid.pixel_grid.pixel_size; PET3D::Point<F> ll = voxel_set.grid.lower_left_at(voxel); #if DEBUG std::cout << "emitting from pixel at " << ll.x << ' ' << ll.y << ' ' << ll.z << std::endl; #endif for (int i = 0; i < n_emissions; ++i) { F rx = ll.x + one_dis(rng) * pixel_size; F ry = ll.y + one_dis(rng) * pixel_size; F rz = ll.z + one_dis(rng) * pixel_size; auto dir = direction(rng); #if DEBUG std::cout << dir.x << ' ' << dir.y << ' ' << dir.z << std::endl; #endif Event event(PET3D::Point<F>(rx, ry, rz), dir); typename Scanner::Response response; auto hits = scanner.detect(rng, model, event, response); if (hits >= 2) { voxel_set.value(i_voxel) += F(1.0); } } } template <class RNG, typename AcceptanceModel> void map(RNG& rng, AcceptanceModel& model, int n_emissions) { #if _OPENMP // OpenMP uses passed random generator as seed source for // thread local random generators RNG* mp_rngs = new (alloca(sizeof(RNG) * omp_get_max_threads())) RNG[omp_get_max_threads()]; for (auto t = 0; t < omp_get_max_threads(); ++t) { mp_rngs[t].seed(rng()); } #pragma omp parallel for schedule(dynamic) // #pragma omp parallel for #endif for (int i_voxel = 0; i_voxel < (int)voxel_set.size(); ++i_voxel) { #if _OPENMP auto& l_rng = mp_rngs[omp_get_thread_num()]; #else auto& l_rng = rng; #endif auto voxel = voxel_set.voxel(i_voxel); map(i_voxel, voxel, l_rng, model, n_emissions); } } private: Scanner& scanner; VoxelSet& voxel_set; util::random::uniform_real_distribution<F> one_dis; Distribution::SphericalDistribution<F> direction; }; } // Hybrid } // PET3D
syr2k.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "syr2k.h" /* Array initialization. */ static void init_array(int ni, int nj, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni), DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i __attribute__((annotate("scalar(range(0, " PB_XSTR(NI) ") final)"))); int j __attribute__((annotate("scalar(range(0, " PB_XSTR(NJ) ") final)"))); *alpha = 32412; *beta = 2123; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) i*j) / ni; B[i][j] = ((DATA_TYPE) i*j) / ni; } for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) C[i][j] = ((DATA_TYPE) i*j) / ni; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_syr2k(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni), DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j, k; #pragma scop #pragma omp parallel { /* C := alpha*A*B' + alpha*B*A' + beta*C */ #pragma omp for private (j) schedule(static) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NI; j++) C[i][j] *= beta; #pragma omp for private (j, k) schedule(static) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NI; j++) for (k = 0; k < _PB_NJ; k++) { C[i][j] += alpha * A[i][k] * B[j][k]; C[i][j] += alpha * B[i][k] * A[j][k]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ DATA_TYPE alpha __attribute__((annotate("target('alpha') scalar()"))); DATA_TYPE beta __attribute__((annotate("target('beta') scalar()"))); POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE __attribute__((annotate("target('C') scalar(range(0, 120000000000000) final)"))),NI,NI,ni,ni); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE __attribute__((annotate("target('A') scalar()"))),NI,NJ,ni,nj); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE __attribute__((annotate("target('B') scalar()"))),NI,NJ,ni,nj); /* Initialize array(s). */ init_array (ni, nj, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_syr2k (ni, nj, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
test3_0.c
/* * test3_0.c and test3_1.c are not equivalent because * they have different private list for the parallel construct. */ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; #pragma omp parallel private(nthreads, tid) { tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } exit(0); }
GB_unop__tan_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tan_fc32_fc32) // op(A') function: GB (_unop_tran__tan_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = ctanf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ctanf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = ctanf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TAN || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tan_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ctanf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ctanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tan_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_2s_interp.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildModExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModPartialExtInterpHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr ) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommHandle *comm_handle = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); //HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); /*HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt total_global_cpts; HYPRE_BigInt total_old_global_cpts; /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* Intermediate matrices */ hypre_ParCSRMatrix *As_FF, *As_FC, *W; HYPRE_Real *D_q, *D_w; HYPRE_Real *D_q_offd = NULL; hypre_CSRMatrix *As_FF_diag; hypre_CSRMatrix *As_FF_offd; hypre_CSRMatrix *As_FC_diag; hypre_CSRMatrix *As_FC_offd; hypre_CSRMatrix *W_diag; hypre_CSRMatrix *W_offd; HYPRE_Int *As_FF_diag_i; HYPRE_Int *As_FF_diag_j; HYPRE_Int *As_FF_offd_i; HYPRE_Int *As_FF_offd_j; HYPRE_Int *As_FC_diag_i; HYPRE_Int *As_FC_offd_i; HYPRE_Int *W_diag_i; HYPRE_Int *W_offd_i; HYPRE_Int *W_diag_j; HYPRE_Int *W_offd_j; HYPRE_Real *As_FF_diag_data; HYPRE_Real *As_FF_offd_data; HYPRE_Real *As_FC_diag_data; HYPRE_Real *As_FC_offd_data; HYPRE_Real *W_diag_data; HYPRE_Real *W_offd_data; HYPRE_Real *buf_data = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int num_cols_A_FF_offd; HYPRE_Int new_ncols_P_offd; HYPRE_Int num_cols_P_offd; HYPRE_Int *P_marker = NULL; //HYPRE_Int *dof_func_offd = NULL; /* Loop variables */ HYPRE_Int index; HYPRE_Int i, j; HYPRE_Int *cpt_array; HYPRE_Int *new_fpt_array; HYPRE_Int *start_array; HYPRE_Int *new_fine_to_fine; HYPRE_Int start, stop, startf, stopf, startnewf, stopnewf; HYPRE_Int cnt_diag, cnt_offd, row, c_pt, fpt; HYPRE_Int startc, num_sends; /* Definitions */ //HYPRE_Real wall_time; HYPRE_Int n_Cpts, n_Fpts, n_old_Cpts, n_new_Fpts; HYPRE_Int num_threads = hypre_NumThreads(); //if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } if (my_id == (num_procs - 1)) { total_old_global_cpts = num_old_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); n_Cpts = num_cpts_global[1] - num_cpts_global[0]; n_old_Cpts = num_old_cpts_global[1] - num_old_cpts_global[0]; hypre_ParCSRMatrixGenerateFFFC3(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF); As_FC_diag = hypre_ParCSRMatrixDiag(As_FC); As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag); As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag); As_FC_offd = hypre_ParCSRMatrixOffd(As_FC); As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd); As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd); As_FF_diag = hypre_ParCSRMatrixDiag(As_FF); As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag); As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag); As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag); As_FF_offd = hypre_ParCSRMatrixOffd(As_FF); As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd); As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd); As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd); n_new_Fpts = hypre_CSRMatrixNumRows(As_FF_diag); n_Fpts = hypre_CSRMatrixNumRows(As_FC_diag); n_new_Fpts = n_old_Cpts - n_Cpts; num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd); D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P); new_fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_new_Fpts, HYPRE_MEMORY_HOST); D_w = hypre_CTAlloc(HYPRE_Real, n_new_Fpts, memory_location_P); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); start_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,startnewf,stopnewf,row,fpt) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Real beta, gamma; start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } start_array[my_thread_num + 1] = stop; row = 0; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num]++; } else if (CF_marker[i] == -2) { new_fpt_array[my_thread_num]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < num_threads; i++) { cpt_array[i] += cpt_array[i - 1]; new_fpt_array[i] += new_fpt_array[i - 1]; } /*if (num_functions > 1) { HYPRE_Int *int_buf_data = NULL; HYPRE_Int num_sends, startc; HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, memory_location_P); index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, memory_location_P); }*/ } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { startf = start - cpt_array[my_thread_num - 1]; } else { startf = 0; } if (my_thread_num < num_threads - 1) { stopf = stop - cpt_array[my_thread_num]; } else { stopf = n_Fpts; } /* Create D_q = D_beta */ for (i = startf; i < stopf; i++) { for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { D_q[i] += As_FC_diag_data[j]; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { D_q[i] += As_FC_offd_data[j]; } } row = 0; if (my_thread_num) { row = new_fpt_array[my_thread_num - 1]; } fpt = startf; for (i = start; i < stop; i++) { if (CF_marker[i] == -2) { new_fine_to_fine[row++] = fpt++; } else if (CF_marker[i] < 0) { fpt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_FF_offd) { D_q_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P); } index = 0; comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); if (!comm_pkg) { hypre_MatvecCommPkgCreate(As_FF); comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { buf_data[index++] = D_q[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_q_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Create D_w = D_alpha + D_gamma */ row = 0; if (my_thread_num) { row = new_fpt_array[my_thread_num - 1]; } for (i = start; i < stop; i++) { if (CF_marker[i] == -2) { /*if (num_functions > 1) { HYPRE_Int jA, jC, jS; jC = A_diag_i[i]; for (j=S_diag_i[i]; j < S_diag_i[i+1]; j++) { jS = S_diag_j[j]; jA = A_diag_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func[jA]) { D_w[row] += A_diag_data[jC++]; } else jC++; jA = A_diag_j[jC]; } jC++; } for (j=jC; j < A_diag_i[i+1]; j++) { if (dof_func[i] == dof_func[A_diag_j[j]]) D_w[row] += A_diag_data[j]; } jC = A_offd_i[i]; for (j=S_offd_i[i]; j < S_offd_i[i+1]; j++) { jS = S_offd_j[j]; jA = A_offd_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func_offd[jA]) { D_w[row] += A_offd_data[jC++]; } else jC++; jA = A_offd_j[jC]; } jC++; } for (j=jC; j < A_offd_i[i+1]; j++) { if (dof_func[i] == dof_func_offd[A_offd_j[j]]) D_w[row] += A_offd_data[j]; } row++; } else*/ { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { D_w[row] += A_diag_data[j]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { D_w[row] += A_offd_data[j]; } for (j = As_FF_diag_i[row] + 1; j < As_FF_diag_i[row + 1]; j++) { if (D_q[As_FF_diag_j[j]]) { D_w[row] -= As_FF_diag_data[j]; } } for (j = As_FF_offd_i[row]; j < As_FF_offd_i[row + 1]; j++) { if (D_q_offd[As_FF_offd_j[j]]) { D_w[row] -= As_FF_offd_data[j]; } } D_w[row] -= D_q[new_fine_to_fine[row]]; row++; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif startnewf = 0; if (my_thread_num) { startnewf = new_fpt_array[my_thread_num - 1]; } stopnewf = new_fpt_array[my_thread_num]; for (i = startnewf; i < stopnewf; i++) { j = As_FF_diag_i[i]; if (D_w[i]) { beta = 1.0 / D_w[i]; As_FF_diag_data[j] = beta * D_q[new_fine_to_fine[i]]; for (j = As_FF_diag_i[i] + 1; j < As_FF_diag_i[i + 1]; j++) { As_FF_diag_data[j] *= beta; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { As_FF_offd_data[j] *= beta; } } } for (i = startf; i < stopf; i++) { if (D_q[i]) { gamma = -1.0 / D_q[i]; } else { gamma = 0.0; } for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { As_FC_diag_data[j] *= gamma; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { As_FC_offd_data[j] *= gamma; } } } /* end parallel region */ W = hypre_ParMatmul(As_FF, As_FC); W_diag = hypre_ParCSRMatrixDiag(W); W_offd = hypre_ParCSRMatrixOffd(W); W_diag_i = hypre_CSRMatrixI(W_diag); W_diag_j = hypre_CSRMatrixJ(W_diag); W_diag_data = hypre_CSRMatrixData(W_diag); W_offd_i = hypre_CSRMatrixI(W_offd); W_offd_j = hypre_CSRMatrixJ(W_offd); W_offd_data = hypre_CSRMatrixData(W_offd); num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); /*----------------------------------------------------------------------- * Intialize data for P *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_old_Cpts + 1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_old_Cpts + 1, memory_location_P); P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_new_Fpts]; P_offd_size = hypre_CSRMatrixI(W_offd)[n_new_Fpts]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startnewf,stopnewf,c_pt,row,cnt_diag,cnt_offd) #endif { HYPRE_Int rowp; HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = start_array[my_thread_num]; stop = start_array[my_thread_num + 1]; if (my_thread_num > 0) { c_pt = cpt_array[my_thread_num - 1]; } else { c_pt = 0; } row = 0; if (my_thread_num) { row = new_fpt_array[my_thread_num - 1]; } rowp = row; if (my_thread_num > 0) { rowp = row + cpt_array[my_thread_num - 1]; } cnt_diag = W_diag_i[row] + c_pt; cnt_offd = W_offd_i[row]; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { rowp++; P_diag_j[cnt_diag] = c_pt++; P_diag_data[cnt_diag++] = 1.0; P_diag_i[rowp] = cnt_diag; P_offd_i[rowp] = cnt_offd; } else if (CF_marker[i] == -2) { rowp++; for (j = W_diag_i[row]; j < W_diag_i[row + 1]; j++) { P_diag_j[cnt_diag] = W_diag_j[j]; P_diag_data[cnt_diag++] = W_diag_data[j]; } for (j = W_offd_i[row]; j < W_offd_i[row + 1]; j++) { P_offd_j[cnt_offd] = W_offd_j[j]; P_offd_data[cnt_offd++] = W_offd_data[j]; } row++; P_diag_i[rowp] = cnt_diag; P_offd_i[rowp] = cnt_offd; } } } /* end parallel region */ /*----------------------------------------------------------------------- * Create matrix *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, total_old_global_cpts, total_global_cpts, num_old_cpts_global, num_cpts_global, num_cols_P_offd, P_diag_i[n_old_Cpts], P_offd_i[n_old_Cpts]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W); hypre_ParCSRMatrixColMapOffd(W) = NULL; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { HYPRE_Int *map; hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_old_Cpts]; P_offd_size = P_offd_i[n_old_Cpts]; col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); if (num_cols_P_offd) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < P_offd_size; i++) { P_marker[P_offd_j[i]] = 1; } new_ncols_P_offd = 0; for (i = 0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_ncols_P_offd++; } new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST); map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_col_map_offd[index] = col_map_offd_P[i]; map[index++] = i; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd); } hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd; hypre_TFree(map, HYPRE_MEMORY_HOST); } } hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* Deallocate memory */ hypre_TFree(D_q, memory_location_P); hypre_TFree(D_q_offd, memory_location_P); hypre_TFree(D_w, memory_location_P); //hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST); hypre_TFree(start_array, HYPRE_MEMORY_HOST); hypre_TFree(new_fine_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, memory_location_P); hypre_ParCSRMatrixDestroy(As_FF); hypre_ParCSRMatrixDestroy(As_FC); hypre_ParCSRMatrixDestroy(W); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGBuildModPartialExtInterp( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("PartialExtInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGBuildModPartialExtInterpDevice(A, CF_marker, S, num_cpts_global, num_old_cpts_global, debug_flag, trunc_factor, max_elmts, P_ptr); } else #endif { ierr = hypre_BoomerAMGBuildModPartialExtInterpHost(A, CF_marker, S, num_cpts_global, num_old_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } HYPRE_Int hypre_BoomerAMGBuildModPartialExtPEInterpHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommHandle *comm_handle = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt total_global_cpts; HYPRE_BigInt total_old_global_cpts; /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* Intermediate matrices */ hypre_ParCSRMatrix *As_FF, *As_FC, *W; HYPRE_Real *D_q, *D_w, *D_lambda, *D_inv, *D_tau; HYPRE_Real *D_lambda_offd = NULL, *D_inv_offd = NULL; hypre_CSRMatrix *As_FF_diag; hypre_CSRMatrix *As_FF_offd; hypre_CSRMatrix *As_FC_diag; hypre_CSRMatrix *As_FC_offd; hypre_CSRMatrix *W_diag; hypre_CSRMatrix *W_offd; HYPRE_Int *As_FF_diag_i; HYPRE_Int *As_FF_diag_j; HYPRE_Int *As_FF_offd_i; HYPRE_Int *As_FF_offd_j; HYPRE_Int *As_FC_diag_i; HYPRE_Int *As_FC_offd_i; HYPRE_Int *W_diag_i; HYPRE_Int *W_offd_i; HYPRE_Int *W_diag_j; HYPRE_Int *W_offd_j; HYPRE_Real *As_FF_diag_data; HYPRE_Real *As_FF_offd_data; HYPRE_Real *As_FC_diag_data; HYPRE_Real *As_FC_offd_data; HYPRE_Real *W_diag_data; HYPRE_Real *W_offd_data; HYPRE_Real *buf_data = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int num_cols_A_FF_offd; HYPRE_Int new_ncols_P_offd; HYPRE_Int num_cols_P_offd; HYPRE_Int *P_marker = NULL; HYPRE_Int *dof_func_offd = NULL; /* Loop variables */ HYPRE_Int index; HYPRE_Int i, j; HYPRE_Int *cpt_array; HYPRE_Int *new_fpt_array; HYPRE_Int *start_array; HYPRE_Int *new_fine_to_fine; HYPRE_Int start, stop, startf, stopf, startnewf, stopnewf; HYPRE_Int cnt_diag, cnt_offd, row, c_pt, fpt; HYPRE_Int startc, num_sends; /* Definitions */ //HYPRE_Real wall_time; HYPRE_Int n_Cpts, n_Fpts, n_old_Cpts, n_new_Fpts; HYPRE_Int num_threads = hypre_NumThreads(); //if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } if (my_id == (num_procs - 1)) { total_old_global_cpts = num_old_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); n_Cpts = num_cpts_global[1] - num_cpts_global[0]; n_old_Cpts = num_old_cpts_global[1] - num_old_cpts_global[0]; hypre_ParCSRMatrixGenerateFFFCD3(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF, &D_lambda); As_FC_diag = hypre_ParCSRMatrixDiag(As_FC); As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag); As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag); As_FC_offd = hypre_ParCSRMatrixOffd(As_FC); As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd); As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd); As_FF_diag = hypre_ParCSRMatrixDiag(As_FF); As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag); As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag); As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag); As_FF_offd = hypre_ParCSRMatrixOffd(As_FF); As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd); As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd); As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd); n_new_Fpts = hypre_CSRMatrixNumRows(As_FF_diag); n_Fpts = hypre_CSRMatrixNumRows(As_FC_diag); n_new_Fpts = n_old_Cpts - n_Cpts; num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd); D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P); D_inv = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P); new_fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_new_Fpts, HYPRE_MEMORY_HOST); D_w = hypre_CTAlloc(HYPRE_Real, n_new_Fpts, memory_location_P); D_tau = hypre_CTAlloc(HYPRE_Real, n_new_Fpts, memory_location_P); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); start_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,startnewf,stopnewf,row,fpt,index) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Real beta, gamma; start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } start_array[my_thread_num + 1] = stop; row = 0; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num]++; } else if (CF_marker[i] == -2) { new_fpt_array[my_thread_num]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < num_threads; i++) { cpt_array[i] += cpt_array[i - 1]; new_fpt_array[i] += new_fpt_array[i - 1]; } if (num_functions > 1) { HYPRE_Int *int_buf_data = NULL; HYPRE_Int num_sends, startc; HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, memory_location_P); index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, memory_location_P); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { startf = start - cpt_array[my_thread_num - 1]; } else { startf = 0; } if (my_thread_num < num_threads - 1) { stopf = stop - cpt_array[my_thread_num]; } else { stopf = n_Fpts; } /* Create D_q = D_beta, D_inv = 1/(D_q+D_lambda) */ for (i = startf; i < stopf; i++) { for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { D_q[i] += As_FC_diag_data[j]; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { D_q[i] += As_FC_offd_data[j]; } if (D_q[i] + D_lambda[i]) { D_inv[i] = 1.0 / (D_q[i] + D_lambda[i]); } } row = 0; if (my_thread_num) { row = new_fpt_array[my_thread_num - 1]; } fpt = startf; for (i = start; i < stop; i++) { if (CF_marker[i] == -2) { new_fine_to_fine[row++] = fpt++; } else if (CF_marker[i] < 0) { fpt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_FF_offd) { D_lambda_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P); D_inv_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P); } index = 0; comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); if (!comm_pkg) { hypre_MatvecCommPkgCreate(As_FF); comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { buf_data[index++] = D_lambda[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_lambda_offd); hypre_ParCSRCommHandleDestroy(comm_handle); index = 0; for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { buf_data[index++] = D_inv[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_inv_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Create D_tau */ startnewf = 0; if (my_thread_num) { startnewf = new_fpt_array[my_thread_num - 1]; } stopnewf = new_fpt_array[my_thread_num]; for (i = startnewf; i < stopnewf; i++) { for (j = As_FF_diag_i[i] + 1; j < As_FF_diag_i[i + 1]; j++) { index = As_FF_diag_j[j]; D_tau[i] += As_FF_diag_data[j] * D_lambda[index] * D_inv[index]; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { index = As_FF_offd_j[j]; D_tau[i] += As_FF_offd_data[j] * D_lambda_offd[index] * D_inv_offd[index]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Create D_w = D_alpha + D_gamma + D_tau */ row = 0; if (my_thread_num) { row = new_fpt_array[my_thread_num - 1]; } for (i = start; i < stop; i++) { if (CF_marker[i] == -2) { if (num_functions > 1) { HYPRE_Int jA, jC, jS; jC = A_diag_i[i]; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jS = S_diag_j[j]; jA = A_diag_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func[jA]) { D_w[row] += A_diag_data[jC++]; } else { jC++; } jA = A_diag_j[jC]; } jC++; } for (j = jC; j < A_diag_i[i + 1]; j++) { if (dof_func[i] == dof_func[A_diag_j[j]]) { D_w[row] += A_diag_data[j]; } } jC = A_offd_i[i]; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jS = S_offd_j[j]; jA = A_offd_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func_offd[jA]) { D_w[row] += A_offd_data[jC++]; } else { jC++; } jA = A_offd_j[jC]; } jC++; } for (j = jC; j < A_offd_i[i + 1]; j++) { if (dof_func[i] == dof_func_offd[A_offd_j[j]]) { D_w[row] += A_offd_data[j]; } } D_w[row] += D_tau[row]; row++; } else { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { D_w[row] += A_diag_data[j]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { D_w[row] += A_offd_data[j]; } for (j = As_FF_diag_i[row] + 1; j < As_FF_diag_i[row + 1]; j++) { if (D_inv[As_FF_diag_j[j]]) { D_w[row] -= As_FF_diag_data[j]; } } for (j = As_FF_offd_i[row]; j < As_FF_offd_i[row + 1]; j++) { if (D_inv_offd[As_FF_offd_j[j]]) { D_w[row] -= As_FF_offd_data[j]; } } D_w[row] += D_tau[row] - D_q[new_fine_to_fine[row]]; row++; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif startnewf = 0; if (my_thread_num) { startnewf = new_fpt_array[my_thread_num - 1]; } stopnewf = new_fpt_array[my_thread_num]; for (i = startnewf; i < stopnewf; i++) { j = As_FF_diag_i[i]; if (D_w[i]) { beta = -1.0 / D_w[i]; As_FF_diag_data[j] = beta * (D_q[new_fine_to_fine[i]] + D_lambda[new_fine_to_fine[i]]); for (j = As_FF_diag_i[i] + 1; j < As_FF_diag_i[i + 1]; j++) { As_FF_diag_data[j] *= beta; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { As_FF_offd_data[j] *= beta; } } } for (i = startf; i < stopf; i++) { gamma = D_inv[i]; for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { As_FC_diag_data[j] *= gamma; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { As_FC_offd_data[j] *= gamma; } } } /* end parallel region */ W = hypre_ParMatmul(As_FF, As_FC); W_diag = hypre_ParCSRMatrixDiag(W); W_offd = hypre_ParCSRMatrixOffd(W); W_diag_i = hypre_CSRMatrixI(W_diag); W_diag_j = hypre_CSRMatrixJ(W_diag); W_diag_data = hypre_CSRMatrixData(W_diag); W_offd_i = hypre_CSRMatrixI(W_offd); W_offd_j = hypre_CSRMatrixJ(W_offd); W_offd_data = hypre_CSRMatrixData(W_offd); num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); /*----------------------------------------------------------------------- * Intialize data for P *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_old_Cpts + 1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_old_Cpts + 1, memory_location_P); P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_new_Fpts]; P_offd_size = hypre_CSRMatrixI(W_offd)[n_new_Fpts]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,c_pt,row,cnt_diag,cnt_offd) #endif { HYPRE_Int rowp; HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = start_array[my_thread_num]; stop = start_array[my_thread_num + 1]; if (my_thread_num > 0) { c_pt = cpt_array[my_thread_num - 1]; } else { c_pt = 0; } row = 0; if (my_thread_num) { row = new_fpt_array[my_thread_num - 1]; } rowp = row; if (my_thread_num > 0) { rowp = row + cpt_array[my_thread_num - 1]; } cnt_diag = W_diag_i[row] + c_pt; cnt_offd = W_offd_i[row]; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { rowp++; P_diag_j[cnt_diag] = c_pt++; P_diag_data[cnt_diag++] = 1.0; P_diag_i[rowp] = cnt_diag; P_offd_i[rowp] = cnt_offd; } else if (CF_marker[i] == -2) { rowp++; for (j = W_diag_i[row]; j < W_diag_i[row + 1]; j++) { P_diag_j[cnt_diag] = W_diag_j[j]; P_diag_data[cnt_diag++] = W_diag_data[j]; } for (j = W_offd_i[row]; j < W_offd_i[row + 1]; j++) { P_offd_j[cnt_offd] = W_offd_j[j]; P_offd_data[cnt_offd++] = W_offd_data[j]; } row++; P_diag_i[rowp] = cnt_diag; P_offd_i[rowp] = cnt_offd; } } } /* end parallel region */ /*----------------------------------------------------------------------- * Create matrix *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, total_old_global_cpts, total_global_cpts, num_old_cpts_global, num_cpts_global, num_cols_P_offd, P_diag_i[n_old_Cpts], P_offd_i[n_old_Cpts]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W); hypre_ParCSRMatrixColMapOffd(W) = NULL; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { HYPRE_Int *map; hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_old_Cpts]; P_offd_size = P_offd_i[n_old_Cpts]; col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); if (num_cols_P_offd) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < P_offd_size; i++) { P_marker[P_offd_j[i]] = 1; } new_ncols_P_offd = 0; for (i = 0; i < num_cols_P_offd; i++) { if (P_marker[i]) { new_ncols_P_offd++; } } new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST); map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { if (P_marker[i]) { new_col_map_offd[index] = col_map_offd_P[i]; map[index++] = i; } } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd); } hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd; hypre_TFree(map, HYPRE_MEMORY_HOST); } } hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* Deallocate memory */ hypre_TFree(D_q, memory_location_P); hypre_TFree(D_inv, memory_location_P); hypre_TFree(D_inv_offd, memory_location_P); hypre_TFree(D_lambda, memory_location_P); hypre_TFree(D_lambda_offd, memory_location_P); hypre_TFree(D_tau, memory_location_P); hypre_TFree(D_w, memory_location_P); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST); hypre_TFree(start_array, HYPRE_MEMORY_HOST); hypre_TFree(new_fine_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, memory_location_P); hypre_ParCSRMatrixDestroy(As_FF); hypre_ParCSRMatrixDestroy(As_FC); hypre_ParCSRMatrixDestroy(W); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGBuildModPartialExtPEInterp( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("PartialExtPEInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGBuildModPartialExtPEInterpDevice(A, CF_marker, S, num_cpts_global, num_old_cpts_global, debug_flag, trunc_factor, max_elmts, P_ptr); } else #endif { ierr = hypre_BoomerAMGBuildModPartialExtPEInterpHost(A, CF_marker, S, num_cpts_global, num_old_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; }
LG_CC_7_Nov29_2021.c
//------------------------------------------------------------------------------ // LG_CC_FastSV6: connected components //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause //------------------------------------------------------------------------------ // Code is based on the algorithm described in the following paper // Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component // Algorithm with Fast Convergence (SIAM PP20) // A subsequent update to the algorithm is here (which might not be reflected // in this code): // Yongzhe Zhang, Ariful Azad, Aydin Buluc: Parallel algorithms for finding // connected components using linear algebra. J. Parallel Distributed Comput. // 144: 14-27 (2020). // Modified by Tim Davis, Texas A&M University // The input graph G must be undirected, or directed and with an adjacency // matrix that has a symmetric structure. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // structure is accessed. // This function should not be called by multiple user threads on the same // graph G, since it unpacks G->A and then packs it back when done. G->A is // unchanged when the function returns, but during execution G->A is empty. #define LAGraph_FREE_ALL ; #include "LG_internal.h" #if !LG_VANILLA #if (! LG_SUITESPARSE ) #error "SuiteSparse:GraphBLAS v6.0.0 or later required" #endif //------------------------------------------------------------------------------ // Reduce_assign: parent (Px) += mngp, using MIN as the "+=" accum operator //------------------------------------------------------------------------------ // The Px array of size n is the non-opaque copy of the parent vector, where // i = Px [j] if the parent of node j is node i. It can thus have duplicates. // The vectors parent and mngp are full (all entries present). This function // computes the following, which is done explicitly in the Reduce_assign // function in LG_CC_Boruvka: // // for (j = 0 ; j < n ; j++) // { // uint64_t i = Px [j] ; // parent [i] = min (parent [i], mngp [j]) ; // } // // If C(i,j) is present where i == Px [j], then this can be written as: // // parent = min (parent, C*mngp) // // when using the min_2nd semiring. This can be done efficiently where // because C can be constructed in O(1) time and O(1) additional space (not // counting the prior Cp, Ci, and Cx arrays), when using the SuiteSparse // pack/unpack move constructors. The min_2nd semiring ignores the values of // C and operates only on the structure, so its values are not relevant. // C is thus chosen as a GrB_BOOL array where Cx [0] = false. static inline GrB_Info Reduce_assign ( // input/output: GrB_Vector parent, // vector of size n, all entries present // input: GrB_BinaryOp min, // min operator (uint32 or uint64) GrB_Semiring min_2nd, // min_2nd semiring (uint32 or uint64) GrB_Vector mngp, // vector of size n, all entries present GrB_Matrix C, // boolean matrix of size n-by-n GrB_Index **Cp_handle, // array of size n+1, equal to 0:n GrB_Index **Ci_handle, // Px array of size n, always uint64 void **Cx_handle, // array of size 1, contents not accessed char *msg ) { // size of Cp, Ci, and Cx in bytes GrB_Index n ; GrB_TRY (GrB_Vector_size (&n, parent)) ; GrB_Index Cp_size = (n+1) * sizeof (GrB_Index) ; GrB_Index Ci_size = n * sizeof (GrB_Index) ; GrB_Index Cx_size = sizeof (bool) ; // pack Cp, Ci, and Cx into a matrix C with C(i,j) present if Ci(j) == i bool iso = true, jumbled = false ; GrB_TRY (GxB_Matrix_pack_CSC (C, Cp_handle, Ci_handle, Cx_handle, Cp_size, Ci_size, Cx_size, iso, jumbled, NULL)) ; // parent = min (parent, C*mngp) using the MIN_SECOND semiring GrB_TRY (GrB_mxv (parent, NULL, min, min_2nd, C, mngp, NULL)) ; // unpack the contents of C GrB_TRY (GxB_Matrix_unpack_CSC (C, Cp_handle, Ci_handle, Cx_handle, &Cp_size, &Ci_size, &Cx_size, &iso, &jumbled, NULL)) ; return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // LG_CC_FastSV6 //------------------------------------------------------------------------------ // The output of LG_CC_FastSV* is a vector component, where component(i)=s if // node i is in the connected compononent whose representative is node s. If s // is a representative, then component(s)=s. The number of connected // components in the graph G is the number of representatives. #undef LAGraph_FREE_WORK #define LAGraph_FREE_WORK \ { \ LAGraph_Free ((void **) &Tp) ; \ LAGraph_Free ((void **) &Tj) ; \ LAGraph_Free ((void **) &Tx) ; \ LAGraph_Free ((void **) &Cp) ; \ LAGraph_Free ((void **) &Px) ; \ LAGraph_Free ((void **) &Cx) ; \ LAGraph_Free ((void **) &ht_key) ; \ LAGraph_Free ((void **) &ht_count) ; \ LAGraph_Free ((void **) &count) ; \ LAGraph_Free ((void **) &range) ; \ GrB_free (&T) ; \ GrB_free (&t) ; \ GrB_free (&y) ; \ GrB_free (&gp) ; \ GrB_free (&mngp) ; \ GrB_free (&gp_new) ; \ } #undef LAGraph_FREE_ALL #define LAGraph_FREE_ALL \ { \ LAGraph_FREE_WORK ; \ GrB_free (&parent) ; \ } #endif int LG_CC_FastSV6 // SuiteSparse:GraphBLAS method, with GxB extensions ( // output GrB_Vector *component, // component(i)=s if node is in the component s // inputs LAGraph_Graph G, // input graph char *msg ) { #if LG_VANILLA LG_CHECK (0, GrB_NOT_IMPLEMENTED, "SuiteSparse required for this method") ; #else //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LG_CLEAR_MSG ; int64_t *range = NULL ; GrB_Index n, nnz, Cp_size = 0, *ht_key = NULL, *Px = NULL, *Cp = NULL, *count = NULL, *Tp = NULL, *Tj = NULL ; GrB_Vector parent = NULL, gp_new = NULL, mngp = NULL, gp = NULL, t = NULL, y = NULL ; GrB_Matrix T = NULL, C = NULL ; void *Tx = NULL, *Cx = NULL ; int *ht_count = NULL ; LG_CHECK (LAGraph_CheckGraph (G, msg), GrB_INVALID_OBJECT, "graph is invalid") ; LG_CHECK (component == NULL, GrB_NULL_POINTER, "component is NULL") ; if (G->kind == LAGRAPH_ADJACENCY_UNDIRECTED || (G->kind == LAGRAPH_ADJACENCY_DIRECTED && G->A_structure_is_symmetric == LAGRAPH_TRUE)) { // A must be symmetric ; } else { // A must not be unsymmetric LG_CHECK (false, GrB_INVALID_VALUE, "input must be symmetric") ; } //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- GrB_Matrix A = G->A ; GrB_TRY (GrB_Matrix_nrows (&n, A)) ; GrB_TRY (GrB_Matrix_nvals (&nnz, A)) ; // determine the integer type, operators, and semirings to use GrB_Type Uint, Int ; GrB_IndexUnaryOp ramp ; GrB_Semiring min_2nd, min_2ndi ; GrB_BinaryOp min, ne, imin ; #ifdef COVERAGE // Just for test coverage, use 64-bit ints for n > 100. Do not use this // rule in production! #define NBIG 100 #else // For production use: 64-bit integers if n > 2^31 #define NBIG INT32_MAX #endif if (n > NBIG) { // use 64-bit integers throughout Uint = GrB_UINT64 ; Int = GrB_INT64 ; ramp = GrB_ROWINDEX_INT64 ; min = GrB_MIN_UINT64 ; imin = GrB_MIN_INT64 ; ne = GrB_NE_UINT64 ; min_2nd = GrB_MIN_SECOND_SEMIRING_UINT64 ; min_2ndi = GxB_MIN_SECONDI_INT64 ; } else { // use 32-bit integers, except for Px and for constructing the matrix C Uint = GrB_UINT32 ; Int = GrB_INT32 ; ramp = GrB_ROWINDEX_INT32 ; min = GrB_MIN_UINT32 ; imin = GrB_MIN_INT32 ; ne = GrB_NE_UINT32 ; min_2nd = GrB_MIN_SECOND_SEMIRING_UINT32 ; min_2ndi = GxB_MIN_SECONDI_INT32 ; } // FASTSV_SAMPLES: number of samples to take from each row A(i,:). // Sampling is used if the average degree is > 8 and if n > 1024. #define FASTSV_SAMPLES 4 bool sampling = (nnz > n * FASTSV_SAMPLES * 2 && n > 1024) ; // determine # of threads to use int nthreads ; LAGraph_TRY (LAGraph_GetNumThreads (&nthreads, NULL)) ; nthreads = LAGraph_MIN (nthreads, n / 16) ; nthreads = LAGraph_MAX (nthreads, 1) ; GrB_TRY (GrB_Vector_new (&gp_new, Uint, n)) ; Cx = (void *) LAGraph_Calloc (1, sizeof (bool)) ; Px = (GrB_Index *) LAGraph_Malloc (n, sizeof (GrB_Index)) ; LG_CHECK (Px == NULL || Cx == NULL, GrB_OUT_OF_MEMORY, "out of memory") ; // create Cp = 0:n (always 64-bit) and the empty C matrix GrB_TRY (GrB_Matrix_new (&C, GrB_BOOL, n, n)) ; GrB_TRY (GrB_Vector_new (&t, GrB_INT64, n+1)) ; GrB_TRY (GrB_assign (t, NULL, NULL, 0, GrB_ALL, n+1, NULL)) ; GrB_TRY (GrB_apply (t, NULL, NULL, GrB_ROWINDEX_INT64, t, 0, NULL)) ; GrB_TRY (GxB_Vector_unpack_Full (t, (void **) &Cp, &Cp_size, NULL, NULL)) ; GrB_TRY (GrB_free (&t)) ; //-------------------------------------------------------------------------- // warmup: y = min (0:n-1, A*t) using the MIN_SECONDI semiring //-------------------------------------------------------------------------- // y (i) = min (i, j) for all entries A(i,j). This warmup phase takes only // O(n) time, because of how the MIN_SECONDI semiring is implemented in // SuiteSparse:GraphBLAS. A is held by row, and the first entry in A(i,:) // is the minimum index j, so only the first entry in A(i,:) needs to be // considered for each row i. GrB_TRY (GrB_Vector_new (&t, Int, n)) ; GrB_TRY (GrB_Vector_new (&y, Int, n)) ; GrB_TRY (GrB_assign (t, NULL, NULL, 0, GrB_ALL, n, NULL)) ; GrB_TRY (GrB_assign (y, NULL, NULL, 0, GrB_ALL, n, NULL)) ; GrB_TRY (GrB_apply (y, NULL, NULL, ramp, y, 0, NULL)) ; GrB_TRY (GrB_mxv (y, NULL, imin, min_2ndi, A, t, NULL)) ; GrB_TRY (GrB_free (&t)) ; // The typecast from Int to Uint is required because the ROWINDEX operator // and MIN_SECONDI do not work in the UINT* domains, as built-in operators. // parent = (Uint) y GrB_TRY (GrB_Vector_new (&parent, Uint, n)) ; GrB_TRY (GrB_assign (parent, NULL, NULL, y, GrB_ALL, n, NULL)) ; GrB_TRY (GrB_free (&y)) ; // copy parent into gp, mngp, and Px. Px is a non-opaque 64-bit copy of the // parent GrB_Vector. The Px array is always of type GrB_Index since it // must be used as the input array for extractTuples and as Ci for pack_CSR. // If parent is uint32, GraphBLAS typecasts it to the uint64 Px array. GrB_TRY (GrB_Vector_extractTuples (NULL, Px, &n, parent)) ; GrB_TRY (GrB_Vector_dup (&gp, parent)) ; GrB_TRY (GrB_Vector_dup (&mngp, parent)) ; GrB_TRY (GrB_Vector_new (&t, GrB_BOOL, n)) ; //-------------------------------------------------------------------------- // sample phase //-------------------------------------------------------------------------- if (sampling) { //---------------------------------------------------------------------- // unpack A in CSR format //---------------------------------------------------------------------- void *Sx ; GrB_Index *Sp, *Sj, Sp_size, Sj_size, Sx_size, nvals ; bool S_jumbled, S_iso ; GrB_TRY (GrB_Matrix_nvals (&nvals, A)) ; GrB_TRY (GxB_Matrix_unpack_CSR (A, &Sp, &Sj, &Sx, &Sp_size, &Sj_size, &Sx_size, &S_iso, &S_jumbled, NULL)) ; //---------------------------------------------------------------------- // allocate workspace, including space to construct T //---------------------------------------------------------------------- GrB_Index Tp_size = (n+1) * sizeof (GrB_Index) ; GrB_Index Tj_size = nvals * sizeof (GrB_Index) ; GrB_Index Tx_size = sizeof (bool) ; Tp = (GrB_Index *) LAGraph_Malloc (n+1, sizeof (GrB_Index)) ; Tj = (GrB_Index *) LAGraph_Malloc (nvals, sizeof (GrB_Index)) ; Tx = (bool *) LAGraph_Calloc (1, sizeof (bool)) ; range = (int64_t *) LAGraph_Malloc (nthreads + 1, sizeof (int64_t)) ; count = (GrB_Index *) LAGraph_Calloc (nthreads + 1, sizeof (GrB_Index)); LG_CHECK (Tp == NULL || Tj == NULL || Tx == NULL || range == NULL || count == NULL, GrB_OUT_OF_MEMORY, "out of memory") ; //---------------------------------------------------------------------- // define parallel tasks to construct T //---------------------------------------------------------------------- // thread tid works on rows range[tid]:range[tid+1]-1 of A and T for (int tid = 0 ; tid <= nthreads ; tid++) { range [tid] = (n * tid + nthreads - 1) / nthreads ; } //---------------------------------------------------------------------- // determine the number entries to be constructed in T for each thread //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { for (int64_t i = range [tid] ; i < range [tid+1] ; i++) { int64_t deg = Sp [i + 1] - Sp [i] ; count [tid + 1] += LAGraph_MIN (FASTSV_SAMPLES, deg) ; } } //---------------------------------------------------------------------- // count = cumsum (count) //---------------------------------------------------------------------- for (int tid = 0 ; tid < nthreads ; tid++) { count [tid + 1] += count [tid] ; } //---------------------------------------------------------------------- // construct T //---------------------------------------------------------------------- // T (i,:) consists of the first FASTSV_SAMPLES of A (i,:). // TODO: this could be done by GxB_Select, using a new operator. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index p = count [tid] ; Tp [range [tid]] = p ; for (int64_t i = range [tid] ; i < range [tid+1] ; i++) { // construct T (i,:) from the first entries in A (i,:) for (int64_t j = 0 ; j < FASTSV_SAMPLES && Sp [i] + j < Sp [i + 1] ; j++) { Tj [p++] = Sj [Sp [i] + j] ; } Tp [i + 1] = p ; } } //---------------------------------------------------------------------- // import the result into the GrB_Matrix T //---------------------------------------------------------------------- GrB_TRY (GrB_Matrix_new (&T, GrB_BOOL, n, n)) ; GrB_TRY (GxB_Matrix_pack_CSR (T, &Tp, &Tj, &Tx, Tp_size, Tj_size, Tx_size, /* T is iso: */ true, S_jumbled, NULL)) ; //---------------------------------------------------------------------- // find the connected components of T //---------------------------------------------------------------------- bool changing = true ; while (changing) { // hooking & shortcutting // mngp = min (mngp, A*gp) using the MIN_SECOND semiring GrB_TRY (GrB_mxv (mngp, NULL, min, min_2nd, T, gp, NULL)) ; // parent = min (parent, C*mngp), where C(i,j) present if i=Px(j) GrB_TRY (Reduce_assign (parent, min, min_2nd, mngp, C, &Cp, &Px, &Cx, msg)) ; // parent = min (parent, mngp, gp) GrB_TRY (GrB_eWiseAdd (parent, NULL, min, min, mngp, gp, NULL)) ; // calculate grandparent: gp_new = parent (parent) // if parent is uint32, GraphBLAS typecasts to uint64 for Px GrB_TRY (GrB_Vector_extractTuples (NULL, Px, &n, parent)) ; GrB_TRY (GrB_extract (gp_new, NULL, NULL, parent, Px, n, NULL)) ; // terminate if gp and gp_new are the same GrB_TRY (GrB_eWiseMult (t, NULL, NULL, ne, gp_new, gp, NULL)) ; GrB_TRY (GrB_reduce (&changing, NULL, GrB_LOR_MONOID_BOOL, t, NULL)) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } //---------------------------------------------------------------------- // use sampling to estimate the largest connected component in T //---------------------------------------------------------------------- // hash table size must be a power of 2 #define HASH_SIZE 1024 // number of samples to insert into the hash table #define HASH_SAMPLES 864 #define HASH(x) (((x << 4) + x) & (HASH_SIZE-1)) #define NEXT(x) ((x + 23) & (HASH_SIZE-1)) // allocate and initialize the hash table ht_key = (GrB_Index *) LAGraph_Malloc (HASH_SIZE, sizeof (GrB_Index)) ; ht_count = (int *) LAGraph_Calloc (HASH_SIZE, sizeof (int)) ; LG_CHECK (ht_key == NULL || ht_count == NULL, GrB_OUT_OF_MEMORY, "out of memory") ; for (int k = 0 ; k < HASH_SIZE ; k++) { ht_key [k] = UINT64_MAX ; } // hash the samples and find the most frequent entry uint64_t seed = n ; // random number seed int64_t key = -1 ; // most frequent entry int max_count = 0 ; // frequency of most frequent entry for (int64_t k = 0 ; k < HASH_SAMPLES ; k++) { // select an entry from Px at random GrB_Index x = Px [LAGraph_Random60 (&seed) % n] ; // find x in the hash table GrB_Index h = HASH (x) ; while (ht_key [h] != UINT64_MAX && ht_key [h] != x) { h = NEXT (h) ; } // add x to the hash table ht_key [h] = x ; ht_count [h]++ ; // keep track of the most frequent value if (ht_count [h] > max_count) { key = ht_key [h] ; max_count = ht_count [h] ; } } //---------------------------------------------------------------------- // compact the largest connected component in T //---------------------------------------------------------------------- // TODO: replace this with GxB_extract with GrB_Vector index arrays. // unpack T to resuse the space (all content is overwritten below) bool T_jumbled, T_iso ; GrB_TRY (GxB_Matrix_unpack_CSR (T, &Tp, &Tj, &Tx, &Tp_size, &Tj_size, &Tx_size, &T_iso, &T_jumbled, NULL)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index p = Sp [range [tid]] ; // thread tid scans A (range [tid]:range [tid+1]-1,:), // and constructs T(i,:) for all rows in this range. for (int64_t i = range [tid] ; i < range [tid+1] ; i++) { int64_t pi = Px [i] ; // pi = parent (i) Tp [i] = p ; // start the construction of T(i,:) // T(i,:) is empty if pi == key if (pi != key) { // scan A(i,:) for (GrB_Index pS = Sp [i] ; pS < Sp [i+1] ; pS++) { // get A(i,j) int64_t j = Sj [pS] ; if (Px [j] != key) { // add the entry T(i,j) to T, but skip it if // Px [j] is equal to key Tj [p++] = j ; } } // Add the entry T(i,key) if there is room for it in T(i,:); // if and only if node i is adjacent to a node j in the // largest component. The only way there can be space if // at least one T(i,j) appears with Px [j] equal to the key // (that is, node j is in the largest connected component, // key == Px [j]. One of these j's can then be replaced // with the key. If node i is not adjacent to any node in // the largest component, then there is no space in T(i,:) // and no new edge to the largest component is added. if (p - Tp [i] < Sp [i+1] - Sp [i]) { Tj [p++] = key ; } } } // count the number of entries inserted into T by this thread count [tid] = p - Tp [range [tid]] ; } // Compact empty space out of Tj not filled in from the above phase. nnz = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { memcpy (Tj + nnz, Tj + Tp [range [tid]], sizeof (GrB_Index) * count [tid]) ; nnz += count [tid] ; count [tid] = nnz - count [tid] ; } // Compact empty space out of Tp #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index p = Tp [range [tid]] ; for (int64_t i = range [tid] ; i < range [tid+1] ; i++) { Tp [i] -= p - count [tid] ; } } // finalize T Tp [n] = nnz ; // pack T for the final phase GrB_TRY (GxB_Matrix_pack_CSR (T, &Tp, &Tj, &Tx, Tp_size, Tj_size, Tx_size, T_iso, /* T is now jumbled */ true, NULL)) ; // pack A (unchanged since last unpack) GrB_TRY (GxB_Matrix_pack_CSR (A, &Sp, &Sj, &Sx, Sp_size, Sj_size, Sx_size, S_iso, S_jumbled, NULL)) ; // final phase uses the pruned matrix T A = T ; } //-------------------------------------------------------------------------- // check for quick return //-------------------------------------------------------------------------- if (nnz == 0) { (*component) = parent ; LAGraph_FREE_WORK ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // final phase //-------------------------------------------------------------------------- bool changing = true ; while (changing) { // hooking & shortcutting // mngp = min (mngp, A*gp) using the MIN_SECOND semiring GrB_TRY (GrB_mxv (mngp, NULL, min, min_2nd, A, gp, NULL)) ; // parent = min (parent, C*mngp) where C(i,j) is present if i=Px(j) GrB_TRY (Reduce_assign (parent, min, min_2nd, mngp, C, &Cp, &Px, &Cx, msg)) ; // parent = min (parent, mngp, gp) GrB_TRY (GrB_eWiseAdd (parent, NULL, min, min, mngp, gp, NULL)) ; // calculate grandparent: gp_new = parent (parent) // if parent is uint32, GraphBLAS typecasts to uint64 for Px. GrB_TRY (GrB_Vector_extractTuples (NULL, Px, &n, parent)) ; GrB_TRY (GrB_extract (gp_new, NULL, NULL, parent, Px, n, NULL)) ; // terminate if gp and gp_new are the same GrB_TRY (GrB_eWiseMult (t, NULL, NULL, ne, gp_new, gp, NULL)) ; GrB_TRY (GrB_reduce (&changing, NULL, GrB_LOR_MONOID_BOOL, t, NULL)) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- (*component) = parent ; LAGraph_FREE_WORK ; return (GrB_SUCCESS) ; #endif }
core_ctrsm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrsm.c, normal z -> c, Fri Sep 28 17:38:20 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_trsm * * Solves one of the matrix equations * * \f[ op( A )\times X = \alpha B, \f] or * \f[ X \times op( A ) = \alpha B, \f] * * where op( A ) is one of: * \f[ op( A ) = A, \f] * \f[ op( A ) = A^T, \f] * \f[ op( A ) = A^H, \f] * * alpha is a scalar, X and B are m-by-n matrices, and * A is a unit or non-unit, upper or lower triangular matrix. * The matrix X overwrites B. * ******************************************************************************* * * @param[in] side * - PlasmaLeft: op(A)*X = B, * - PlasmaRight: X*op(A) = B. * * @param[in] uplo * - PlasmaUpper: A is upper triangular, * - PlasmaLower: A is lower triangular. * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] diag * - PlasmaNonUnit: A has non-unit diagonal, * - PlasmaUnit: A has unit diagonal. * * @param[in] m * The number of rows of the matrix B. m >= 0. * * @param[in] n * The number of columns of the matrix B. n >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] A * The lda-by-ka triangular matrix, * where ka = m if side = PlasmaLeft, * and ka = n if side = PlasmaRight. * If uplo = PlasmaUpper, the leading k-by-k upper triangular part * of the array A contains the upper triangular matrix, and the * strictly lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading k-by-k lower triangular part * of the array A contains the lower triangular matrix, and the * strictly upper triangular part of A is not referenced. * If diag = PlasmaUnit, the diagonal elements of A are also not * referenced and are assumed to be 1. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,k). * * @param[in,out] B * On entry, the ldb-by-n right hand side matrix B. * On exit, if return value = 0, the ldb-by-n solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_ctrsm(plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, int m, int n, plasma_complex32_t alpha, const plasma_complex32_t *A, int lda, plasma_complex32_t *B, int ldb) { cblas_ctrsm(CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag, m, n, CBLAS_SADDR(alpha), A, lda, B, ldb); } /******************************************************************************/ void plasma_core_omp_ctrsm( plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, int m, int n, plasma_complex32_t alpha, const plasma_complex32_t *A, int lda, plasma_complex32_t *B, int ldb, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (side == PlasmaLeft) ak = m; else ak = n; #pragma omp task depend(in:A[0:lda*ak]) \ depend(inout:B[0:ldb*n]) { if (sequence->status == PlasmaSuccess) plasma_core_ctrsm(side, uplo, transa, diag, m, n, alpha, A, lda, B, ldb); } }
omp_flush.c
<ompts:test> <ompts:testdescription>Test which checks the omp flush directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp flush</ompts:directive> <ompts:dependences>omp barrier</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <unistd.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int <ompts:testcode:functionname>omp_flush</ompts:testcode:functionname> (FILE * logFile) { <ompts:orphan:vars> int result1; int result2; int dummy; </ompts:orphan:vars> result1 = 0; result2 = 0; #pragma omp parallel { int rank; rank = omp_get_thread_num (); #pragma omp barrier if (rank == 1) { result2 = 3; <ompts:orphan> <ompts:check>#pragma omp flush (result2)</ompts:check> dummy = result2; </ompts:orphan> } if (rank == 0) { <ompts:check>my_sleep(SLEEPTIME_LONG);</ompts:check> <ompts:orphan> <ompts:check>#pragma omp flush (result2)</ompts:check> result1 = result2; </ompts:orphan> } } /* end of parallel */ return ((result1 == result2) && (result2 == dummy) && (result2 == 3)); } </ompts:testcode> </ompts:test>
GB_unaryop__ainv_uint8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int16 // op(A') function: GB_tran__ainv_uint8_int16 // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int16 ( uint8_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
dataset.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <LightGBM/meta.h> #include <LightGBM/utils/common.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <string> #include <functional> #include <memory> #include <mutex> #include <unordered_set> #include <utility> #include <vector> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, query level informations. * * Some details: * 1. Label, used for training. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed) * the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1]) * 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null constructor */ Metadata(); /*! * \brief Initialization will load query level informations, since it is need for sampling data * \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char* data_filename, const char* initscore_file); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indices of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const label_t* label, data_size_t len); void SetWeights(const label_t* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(const char* initscore_file); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<label_t> label_; /*! \brief Weights data */ std::vector<label_t> weights_; /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<label_t> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; virtual int NumFeatures() const = 0; /*! * \brief Create an object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx); }; /*! \brief The main class of data set, * which are used to training or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>* bin_mappers, int num_total_features, const std::vector<std::vector<double>>& forced_bins, int** sample_non_zero_indices, const int* num_per_col, int num_sample_col, size_t total_sample_cnt, const Config& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int> ValidFeatureIndices() const { std::vector<int> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void ConstructHistograms(const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, int leaf_idx, std::vector<std::unique_ptr<OrderedBin>>* ordered_bins, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, bool is_constant_hessian, HistogramBinEntry* histogram_data) const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry* data) const; inline data_size_t Split(int feature, const uint32_t* threshold, int num_threshold, bool default_left, data_size_t* data_indices, data_size_t num_data, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int8_t FeatureMonotone(int i) const { if (monotone_types_.empty()) { return 0; } else { return monotone_types_[i]; } } inline double FeaturePenalte(int i) const { if (feature_penalty_.empty()) { return 1; } else { return feature_penalty_[i]; } } bool HasMonotone() const { if (monotone_types_.empty()) { return false; } else { for (size_t i = 0; i < monotone_types_.size(); ++i) { if (monotone_types_[i] != 0) { return true; } } return false; } } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin* FeatureBin(int i) const { const int group = feature2group_[i]; return feature_groups_[group]->bin_data_.get(); } inline const Bin* FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline bool FeatureGroupIsSparse(int group) const { return feature_groups_[group]->is_sparse_; } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator* FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } // given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const { ordered_bins->resize(num_groups_); OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_groups_; ++i) { OMP_LOOP_EX_BEGIN(); ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_;} /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string>(feature_names); // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto& feature_name : feature_names_) { // check ascii if (!Common::CheckASCII(feature_name)) { Log::Fatal("Do not support non-ascii characters in feature name."); } if (feature_name.find(' ') != std::string::npos) { spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } } if (spaceInFeatureName) { Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } void ResetConfig(const char* parameters); /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; void addFeaturesFrom(Dataset* other); private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief Threshold for treating a feature as a sparse feature */ double sparse_threshold_; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; std::vector<int8_t> monotone_types_; std::vector<double> feature_penalty_; bool is_finish_load_; int max_bin_; std::vector<int32_t> max_bin_by_feature_; std::vector<std::vector<double>> forced_bin_bounds_; int bin_construct_sample_cnt_; int min_data_in_bin_; bool use_missing_; bool zero_as_missing_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
GB_unop__identity_fc32_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_uint8 // op(A') function: GB_unop_tran__identity_fc32_uint8 // C type: GxB_FC32_t // A type: uint8_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_uint8 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
a.12.1.c
/* { dg-do compile } */ #include <stdio.h> extern float average (float, float, float); void a12 (float *x, float *xold, int n, float tol) { int c, i, toobig; float error, y; c = 0; #pragma omp parallel { do { #pragma omp for private(i) for (i = 1; i < n - 1; ++i) { xold[i] = x[i]; } #pragma omp single { toobig = 0; } #pragma omp for private(i,y,error) reduction(+:toobig) for (i = 1; i < n - 1; ++i) { y = x[i]; x[i] = average (xold[i - 1], x[i], xold[i + 1]); error = y - x[i]; if (error > tol || error < -tol) ++toobig; } #pragma omp master { ++c; printf ("iteration %d, toobig=%d\n", c, toobig); } } while (toobig > 0); } }
conv_kernel_fp16_arm82.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, Open AI Lab * Author: xlchen@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include <arm_neon.h> #include <sys/time.h> #include "conv_kernel_arm.h" #include "compiler_fp16.h" #define PER_OUT_CHAN 16 void hgemm_4x16_a76(_fp16* biases, _fp16* input, _fp16* kernel, long kernel_size, _fp16* output, long output_xy, long fused_relu); void hgemm_4x4_a76(_fp16* biases, _fp16* input, _fp16* kernel, long kernel_size, _fp16* output, long output_xy, long fused_relu); void im2col_fp16_1x1(_fp16* input, long input_xy, _fp16* col, long col_cnt, long input_chan); void im2col_fp16_3x3(_fp16* input, long input_x, long input_y, long input_chan, _fp16* col, long stride); void im2col(_fp16* im, _fp16* col, int input_chan, int input_x, int input_y, int kernel_x, int kernel_y, int stride_x, int stride_y, int dilation_x, int dilation_y, int pad_w0, int pad_w1, int pad_h0, int pad_h1, int output_x, int output_y, int col_start, int col_end) { int kernel_size = kernel_x * kernel_y * input_chan; int input_xy = input_x * input_y; int pad_x = pad_w0; int pad_y = pad_h0; _fp16* cur_col = col + col_start * kernel_size; int col_i, col_j, kch, ky, kx, i; if((kernel_x == 1) && (kernel_y == 1) && (stride_x == 1) && (stride_y == 1)) { { int col_cnt = (col_end & -4) - (col_start & -4); im2col_fp16_1x1(im + col_start, input_xy, cur_col, col_cnt, input_chan); cur_col += col_cnt * kernel_size; col_i = col_end & -4; } // final 4 input if(col_end & 0x3) { for(col_j = 0; col_j < kernel_size; col_j++) { for(i = 0; i < 4; i++) { if((col_i + i) < col_end) *cur_col++ = *(im + input_xy * col_j + col_i + i); else *cur_col++ = 0.0; } } } } else if((kernel_x == 3) && (kernel_y == 3) && (dilation_x == 1) && (dilation_y == 1)) { int is_pad0 = (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0); for(col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4) { cur_col = col + col_i * kernel_size; int imy0 = col_i / output_x; int imy3 = (col_i + 3) / output_x; int imx0 = col_i - imy0 * output_x; int imx3 = (col_i + 3) - imy3 * output_x; if((imy0 == imy3) && (is_pad0 || (imy0 != 0 && imx0 != 0 && imy0 != (output_y - 1) && imx3 != (output_x - 1)))) { _fp16* l0 = im + (imy0 * stride_y - pad_y) * input_x + (imx0 * stride_x - pad_x); { im2col_fp16_3x3(l0, input_x, input_y, input_chan, cur_col, stride_x); cur_col += 4 * kernel_size; } } else { int cnt_y[4] = {imy0, (col_i + 1) / output_x, (col_i + 2) / output_x, imy3}; int cnt_x[4] = {imx0, col_i - cnt_y[1] * output_x + 1, col_i - cnt_y[2] * output_x + 2, imx3}; int imx_start[4] = {cnt_x[0] * stride_x - pad_x, cnt_x[1] * stride_x - pad_x, cnt_x[2] * stride_x - pad_x, cnt_x[3] * stride_x - pad_x}; int imy_start[4] = {cnt_y[0] * stride_y - pad_y, cnt_y[1] * stride_y - pad_y, cnt_y[2] * stride_y - pad_y, cnt_y[3] * stride_y - pad_y}; for(kch = 0; kch < input_chan; kch++) for(ky = 0; ky < 3; ky++) for(kx = 0; kx < 3; kx++) { int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx}; int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky}; for(i = 0; i < 4; i++) { if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0.0; } } } } // final 4 input if(col_end & 0x3) { int cnt_y[4] = {col_i / output_x, (col_i + 1) / output_x, (col_i + 2) / output_x, (col_i + 3) / output_x}; int cnt_x[4] = {col_i - cnt_y[0] * output_x, col_i - cnt_y[1] * output_x + 1, col_i - cnt_y[2] * output_x + 2, col_i - cnt_y[3] * output_x + 3}; int imx_start[4] = {cnt_x[0] * stride_x - pad_x, cnt_x[1] * stride_x - pad_x, cnt_x[2] * stride_x - pad_x, cnt_x[3] * stride_x - pad_x}; int imy_start[4] = {cnt_y[0] * stride_y - pad_y, cnt_y[1] * stride_y - pad_y, cnt_y[2] * stride_y - pad_y, cnt_y[3] * stride_y - pad_y}; for(kch = 0; kch < input_chan; kch++) for(ky = 0; ky < 3; ky++) for(kx = 0; kx < 3; kx++) { int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx}; int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky}; for(i = 0; i < 4; i++) { if((col_i + i) < col_end && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0.0; } } } } else { // for general cases for(col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4) { int cnt_y[4] = {col_i / output_x, (col_i + 1) / output_x, (col_i + 2) / output_x, (col_i + 3) / output_x}; int cnt_x[4] = {col_i - cnt_y[0] * output_x, col_i - cnt_y[1] * output_x + 1, col_i - cnt_y[2] * output_x + 2, col_i - cnt_y[3] * output_x + 3}; int imx_start[4] = {cnt_x[0] * stride_x - pad_x, cnt_x[1] * stride_x - pad_x, cnt_x[2] * stride_x - pad_x, cnt_x[3] * stride_x - pad_x}; int imy_start[4] = {cnt_y[0] * stride_y - pad_y, cnt_y[1] * stride_y - pad_y, cnt_y[2] * stride_y - pad_y, cnt_y[3] * stride_y - pad_y}; for(kch = 0; kch < input_chan; kch++) for(ky = 0; ky < (kernel_y * dilation_y); ky += dilation_y) for(kx = 0; kx < (kernel_x * dilation_x); kx += dilation_x) { int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx}; int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky}; for(i = 0; i < 4; i++) { if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0.0; } } } // final 4 input if(col_end & 0x3) { int cnt_y[4] = {col_i / output_x, (col_i + 1) / output_x, (col_i + 2) / output_x, (col_i + 3) / output_x}; int cnt_x[4] = {col_i - cnt_y[0] * output_x, col_i - cnt_y[1] * output_x + 1, col_i - cnt_y[2] * output_x + 2, col_i - cnt_y[3] * output_x + 3}; int imx_start[4] = {cnt_x[0] * stride_x - pad_x, cnt_x[1] * stride_x - pad_x, cnt_x[2] * stride_x - pad_x, cnt_x[3] * stride_x - pad_x}; int imy_start[4] = {cnt_y[0] * stride_y - pad_y, cnt_y[1] * stride_y - pad_y, cnt_y[2] * stride_y - pad_y, cnt_y[3] * stride_y - pad_y}; for(kch = 0; kch < input_chan; kch++) for(ky = 0; ky < (kernel_y * dilation_y); ky += dilation_y) for(kx = 0; kx < (kernel_x * dilation_x); kx += dilation_x) { int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx}; int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky}; for(i = 0; i < 4; i++) { if((col_i + i) < col_end && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0.0; } } } } } // interleave 0 ~ (output_chan & -16) kernels with 16 in form of k[0-15][0],k[0-15][1],k[0-15][2].. // interleave (output_chan & -16) ~ ((output_chan + 3) & -4) tail kernls with 4 in form of // k[0-3][0],k[0-3][1],k[0-3][2].. void interleave_kernel(_fp16* kernel, _fp16* kernel_interleaved, int kernel_chan, int kernel_size) { int i, j; _fp16 *cur_kernel0, *cur_kernel1, *cur_kernel2, *cur_kernel3, *cur_kernel4, *cur_kernel5, *cur_kernel6, *cur_kernel7; _fp16 *cur_kernel8, *cur_kernel9, *cur_kernel10, *cur_kernel11, *cur_kernel12, *cur_kernel13, *cur_kernel14, *cur_kernel15; _fp16* cur_kernel_interleaved = kernel_interleaved; // interleave 16 kernels for(i = 0; i < (kernel_chan & -16); i += 16) { cur_kernel0 = kernel + kernel_size * i; cur_kernel1 = kernel + kernel_size * (i + 1); cur_kernel2 = kernel + kernel_size * (i + 2); cur_kernel3 = kernel + kernel_size * (i + 3); cur_kernel4 = kernel + kernel_size * (i + 4); cur_kernel5 = kernel + kernel_size * (i + 5); cur_kernel6 = kernel + kernel_size * (i + 6); cur_kernel7 = kernel + kernel_size * (i + 7); cur_kernel8 = kernel + kernel_size * (i + 8); cur_kernel9 = kernel + kernel_size * (i + 9); cur_kernel10 = kernel + kernel_size * (i + 10); cur_kernel11 = kernel + kernel_size * (i + 11); cur_kernel12 = kernel + kernel_size * (i + 12); cur_kernel13 = kernel + kernel_size * (i + 13); cur_kernel14 = kernel + kernel_size * (i + 14); cur_kernel15 = kernel + kernel_size * (i + 15); for(j = 0; j < kernel_size; j++) { *(cur_kernel_interleaved++) = cur_kernel0[j]; *(cur_kernel_interleaved++) = cur_kernel1[j]; *(cur_kernel_interleaved++) = cur_kernel2[j]; *(cur_kernel_interleaved++) = cur_kernel3[j]; *(cur_kernel_interleaved++) = cur_kernel4[j]; *(cur_kernel_interleaved++) = cur_kernel5[j]; *(cur_kernel_interleaved++) = cur_kernel6[j]; *(cur_kernel_interleaved++) = cur_kernel7[j]; *(cur_kernel_interleaved++) = cur_kernel8[j]; *(cur_kernel_interleaved++) = cur_kernel9[j]; *(cur_kernel_interleaved++) = cur_kernel10[j]; *(cur_kernel_interleaved++) = cur_kernel11[j]; *(cur_kernel_interleaved++) = cur_kernel12[j]; *(cur_kernel_interleaved++) = cur_kernel13[j]; *(cur_kernel_interleaved++) = cur_kernel14[j]; *(cur_kernel_interleaved++) = cur_kernel15[j]; } } for(i = (kernel_chan & -16); i < (kernel_chan & -4); i += 4) { cur_kernel0 = kernel + kernel_size * i; cur_kernel1 = kernel + kernel_size * (i + 1); cur_kernel2 = kernel + kernel_size * (i + 2); cur_kernel3 = kernel + kernel_size * (i + 3); for(j = 0; j < kernel_size; j++) { *(cur_kernel_interleaved++) = cur_kernel0[j]; *(cur_kernel_interleaved++) = cur_kernel1[j]; *(cur_kernel_interleaved++) = cur_kernel2[j]; *(cur_kernel_interleaved++) = cur_kernel3[j]; } } // last 4 kernel cur_kernel0 = kernel + kernel_size * i; cur_kernel1 = kernel + kernel_size * (i + 1); cur_kernel2 = kernel + kernel_size * (i + 2); if((kernel_chan & 0x3) == 3) { for(j = 0; j < kernel_size; j++) { *(cur_kernel_interleaved++) = cur_kernel0[j]; *(cur_kernel_interleaved++) = cur_kernel1[j]; *(cur_kernel_interleaved++) = cur_kernel2[j]; *(cur_kernel_interleaved++) = 0.0; } } else if((kernel_chan & 0x3) == 2) { for(j = 0; j < kernel_size; j++) { *(cur_kernel_interleaved++) = cur_kernel0[j]; *(cur_kernel_interleaved++) = cur_kernel1[j]; *(cur_kernel_interleaved++) = 0.0; *(cur_kernel_interleaved++) = 0.0; } } else if((kernel_chan & 0x3) == 1) { for(j = 0; j < kernel_size; j++) { *(cur_kernel_interleaved++) = cur_kernel0[j]; *(cur_kernel_interleaved++) = 0.0; *(cur_kernel_interleaved++) = 0.0; *(cur_kernel_interleaved++) = 0.0; } } } static void interleave(struct ir_tensor * filter, struct conv_priv_info* priv_info, struct conv_param* param) { int group = param->group; int out_chan = filter->dims[0] / group; int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3]; int kernel_size_g = kernel_size * out_chan; int kernel_interleaved_size_g = kernel_size * ((out_chan + 3) & -4); _fp16* kernel = (_fp16*)filter->data; _fp16* interleave_buf = (_fp16*)priv_info->interleave_buffer; for(int g = 0; g < group; g++) { _fp16* cur_kernel = kernel + g * kernel_size_g; _fp16* cur_interleave = interleave_buf + g * kernel_interleaved_size_g; interleave_kernel(cur_kernel, cur_interleave, out_chan, kernel_size); } } static void hgemm_set(_fp16* col, _fp16* kernel, _fp16* biases, _fp16* output, int kernel_size, int ch_start, int ch_end, int output_xy, int relu_fused, int num_thread, int cpu_affinity) { int nn_outch = ch_end / PER_OUT_CHAN; int col_end3 = output_xy & 0x3; if (col_end3) { #pragma omp parallel for num_threads(num_thread) for (int pp=0; pp<nn_outch; pp++) { int p = pp * PER_OUT_CHAN; _fp16* biasptr = biases ? (_fp16* )(biases + p) : NULL; _fp16* kernel_tmp = (_fp16* )(kernel + p * kernel_size); _fp16* output_tmp = (_fp16* )(output + p * output_xy); int col_line = 0; for(col_line = 0; col_line + 3 < output_xy; col_line += 4) { _fp16* col_tmp = ( _fp16* )(col + col_line * kernel_size); hgemm_4x16_a76(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, relu_fused); } { _fp16 result[64]; _fp16* col_tmp = ( _fp16* )(col + col_line * kernel_size); hgemm_4x16_a76(biasptr, col_tmp, kernel_tmp, kernel_size, result, 4, relu_fused); for(int i = 0; i < 16; i++) { for(int j = 0; j < (col_end3); j++) *(output + (p + i) * output_xy + col_line + j) = result[(i << 2) + j]; } } } } else { #pragma omp parallel for num_threads(num_thread) for (int pp=0; pp<nn_outch; pp++) { int p = pp * PER_OUT_CHAN; _fp16* biasptr = biases ? (_fp16* )(biases + p) : NULL; _fp16* kernel_tmp = (_fp16* )(kernel + p * kernel_size); _fp16* output_tmp = (_fp16* )(output + p * output_xy); for(int col_line = 0; col_line + 3 < output_xy; col_line += 4) { _fp16* col_tmp = (_fp16* )(col + col_line * kernel_size); hgemm_4x16_a76(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, relu_fused); } } } } static void hgemm4x4(_fp16* col, _fp16* kernel, _fp16* biases, _fp16* output, int kernel_size, int ch_start, int ch_end, int output_xy, int relu_fused, int num_thread, int cpu_affinity) { _fp16 result[16]; _fp16* cur_biases = NULL; int col_line, kernel_num; _fp16 *cur_col, *cur_kernel, *cur_output; int i, j; int col_end3 = output_xy & 0x3; int kernel_end3 = ch_end & 0x3; for(kernel_num = ch_start; kernel_num < (ch_end & -4); kernel_num += 4) { if(biases) cur_biases = biases + kernel_num; cur_kernel = kernel + kernel_num * kernel_size; cur_output = output + kernel_num * output_xy; for(col_line = 0; col_line < (output_xy & -4); col_line += 4) { cur_col = col + col_line * kernel_size; hgemm_4x4_a76(cur_biases, cur_col, cur_kernel, kernel_size, cur_output + col_line, output_xy, relu_fused); } if(col_end3) { cur_col = col + col_line * kernel_size; hgemm_4x4_a76(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, relu_fused); for(i = 0; i < 4; i++) { for(j = 0; j < (col_end3); j++) *(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j]; } } } if(kernel_end3) { if(biases) cur_biases = biases + kernel_num; cur_kernel = kernel + kernel_num * kernel_size; for(col_line = 0; col_line < (output_xy & -4); col_line += 4) { cur_col = col + col_line * kernel_size; hgemm_4x4_a76(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, relu_fused); for(i = 0; i < kernel_end3; i++) for(j = 0; j < 4; j++) *(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j]; } if(col_end3) { cur_col = col + col_line * kernel_size; hgemm_4x4_a76(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, relu_fused); for(i = 0; i < (kernel_end3); i++) { for(j = 0; j < (col_end3); j++) *(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j]; } } } } int fp16_conv_hcl_get_shared_mem_size(struct ir_tensor* input , \ struct ir_tensor* output , \ struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int mem_size = sizeof(_fp16) * kernel_size * ((output_xy + 3) & -4) + 128; return mem_size; } static int get_private_mem_size(struct ir_tensor * filter, struct conv_param* param) { int group = param->group; int out_chan = filter->dims[0] / group; int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3]; int mem_size = sizeof(_fp16) * kernel_size * ((out_chan + 3) & -4) * group + 128; return mem_size; } int fp16_conv_hcl_prerun(struct ir_tensor* input_tensor , \ struct ir_tensor* filter_tensor , \ struct ir_tensor* output_tensor , \ struct conv_priv_info* priv_info , \ struct conv_param* param) { if (!priv_info->external_im2col_mem) { int mem_size = fp16_conv_hcl_get_shared_mem_size(input_tensor , output_tensor , param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } interleave(filter_tensor, priv_info, param); return 0; } int fp16_conv_hcl_postrun(struct conv_priv_info* priv_info) { if(!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if(!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } return 0; } int fp16_conv_hcl_run(struct ir_tensor* input_tensor , \ struct ir_tensor* filter_tensor , \ struct ir_tensor* bias_tensor , \ struct ir_tensor* output_tensor , \ struct conv_priv_info* priv_info , \ struct conv_param* param, \ int num_thread, int cpu_affinity) { /* param */ // printf("run into fp16_conv_hcl_run!\n"); int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_h1 = param->pad_h1; int pad_w0 = param->pad_w0; int pad_w1 = param->pad_w1; long fused_relu = param->activation; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1] / group; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* buffer addr */ _fp16* input_buf = (_fp16*)input_tensor->data; _fp16* output_buf = (_fp16*)output_tensor->data; _fp16* col_buf = (_fp16*)priv_info->im2col_buffer; _fp16* interleave_buf = (_fp16*)priv_info->interleave_buffer; _fp16* biases_buf = NULL; if (bias_tensor) biases_buf = (_fp16*)bias_tensor->data; int sgemm_set_chan = out_c / PER_OUT_CHAN * PER_OUT_CHAN; int sgemm_set_remain = out_c % PER_OUT_CHAN; for(int n = 0; n < batch; n++) // batch size { for(int g = 0; g < group; g++) { /* im2col */ _fp16* cur_input = input_buf + (n * group + g) *input_size; im2col(cur_input, col_buf, in_c, in_w, in_h, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h, pad_w0, pad_w1, pad_h0, pad_h1, out_w, out_h, 0, out_hw); /* gemm */ _fp16* cur_kernel = interleave_buf + g * (kernel_size * ((out_c + 3) & -4)); _fp16* cur_output = output_buf + (n * group + g) * output_size; _fp16* cur_bias = biases_buf? (biases_buf + g * out_c) : NULL; hgemm_set(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, 0, sgemm_set_chan, out_hw, fused_relu, num_thread, cpu_affinity); if(sgemm_set_remain) { hgemm4x4(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, sgemm_set_chan, out_c, out_hw, fused_relu, num_thread, cpu_affinity); } } } return 0; }
GB_binop__le_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__le_int8 // A.*B function (eWiseMult): GB_AemultB__le_int8 // A*D function (colscale): GB_AxD__le_int8 // D*A function (rowscale): GB_DxB__le_int8 // C+=B function (dense accum): GB_Cdense_accumB__le_int8 // C+=b function (dense accum): GB_Cdense_accumb__le_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_int8 // C=scalar+B GB_bind1st__le_int8 // C=scalar+B' GB_bind1st_tran__le_int8 // C=A+scalar GB_bind2nd__le_int8 // C=A'+scalar GB_bind2nd_tran__le_int8 // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_INT8 || GxB_NO_LE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__le_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__le_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__le_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__le_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__le_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__le_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__le_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__le_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__le_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__le_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__le_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pair_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int16) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int16) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int16) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = 1 #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT16 || GxB_NO_PAIR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
omptarget.h
//===---- omptarget.h - OpenMP GPU initialization ---------------- CUDA -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the declarations of all library macros, types, // and functions. // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_H #define OMPTARGET_H #include "common/allocator.h" #include "common/debug.h" // debug #include "common/state-queue.h" #include "common/support.h" #include "interface.h" // interfaces with omp, compiler, and user #include "target_impl.h" #define OMPTARGET_NVPTX_VERSION 1.1 // used by the library for the interface with the app #define DISPATCH_FINISHED 0 #define DISPATCH_NOTFINISHED 1 // used by dynamic scheduling #define FINISHED 0 #define NOT_FINISHED 1 #define LAST_CHUNK 2 #define BARRIER_COUNTER 0 #define ORDERED_COUNTER 1 // arguments needed for L0 parallelism only. class omptarget_nvptx_SharedArgs { public: // All these methods must be called by the master thread only. INLINE void Init() { args = buffer; nArgs = MAX_SHARED_ARGS; } INLINE void DeInit() { // Free any memory allocated for outlined parallel function with a large // number of arguments. if (nArgs > MAX_SHARED_ARGS) { SafeFree(args, "new extended args"); Init(); } } INLINE void EnsureSize(size_t size) { if (size > nArgs) { if (nArgs > MAX_SHARED_ARGS) { SafeFree(args, "new extended args"); } args = (void **)SafeMalloc(size * sizeof(void *), "new extended args"); nArgs = size; } } // Called by all threads. INLINE void **GetArgs() const { return args; }; private: // buffer of pre-allocated arguments. void *buffer[MAX_SHARED_ARGS]; // pointer to arguments buffer. // starts off as a pointer to 'buffer' but can be dynamically allocated. void **args; // starts off as MAX_SHARED_ARGS but can increase in size. uint32_t nArgs; }; extern DEVICE omptarget_nvptx_SharedArgs EXTERN_SHARED(omptarget_nvptx_globalArgs); // Worker slot type which is initialized with the default worker slot // size of 4*32 bytes. struct __kmpc_data_sharing_slot { __kmpc_data_sharing_slot *Next; __kmpc_data_sharing_slot *Prev; void *PrevSlotStackPtr; void *DataEnd; char Data[DS_Worker_Warp_Slot_Size]; }; // Data structure to keep in shared memory that traces the current slot, stack, // and frame pointer as well as the active threads that didn't exit the current // environment. struct DataSharingStateTy { __kmpc_data_sharing_slot *SlotPtr[DS_Max_Warp_Number]; void *StackPtr[DS_Max_Warp_Number]; void * volatile FramePtr[DS_Max_Warp_Number]; __kmpc_impl_lanemask_t ActiveThreads[DS_Max_Warp_Number]; }; extern DEVICE DataSharingStateTy EXTERN_SHARED(DataSharingState); //////////////////////////////////////////////////////////////////////////////// // task ICV and (implicit & explicit) task state class omptarget_nvptx_TaskDescr { public: // methods for flags INLINE omp_sched_t GetRuntimeSched() const; INLINE void SetRuntimeSched(omp_sched_t sched); INLINE int InParallelRegion() const { return items.flags & TaskDescr_InPar; } INLINE int InL2OrHigherParallelRegion() const { return items.flags & TaskDescr_InParL2P; } INLINE int IsParallelConstruct() const { return items.flags & TaskDescr_IsParConstr; } INLINE int IsTaskConstruct() const { return !IsParallelConstruct(); } // methods for other fields INLINE uint16_t &ThreadId() { return items.threadId; } INLINE uint64_t &RuntimeChunkSize() { return items.runtimeChunkSize; } INLINE omptarget_nvptx_TaskDescr *GetPrevTaskDescr() const { return prev; } INLINE void SetPrevTaskDescr(omptarget_nvptx_TaskDescr *taskDescr) { prev = taskDescr; } // init & copy INLINE void InitLevelZeroTaskDescr(); INLINE void InitLevelOneTaskDescr(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void Copy(omptarget_nvptx_TaskDescr *sourceTaskDescr); INLINE void CopyData(omptarget_nvptx_TaskDescr *sourceTaskDescr); INLINE void CopyParent(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void CopyForExplicitTask(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void CopyToWorkDescr(omptarget_nvptx_TaskDescr *masterTaskDescr); INLINE void CopyFromWorkDescr(omptarget_nvptx_TaskDescr *workTaskDescr); INLINE void CopyConvergentParent(omptarget_nvptx_TaskDescr *parentTaskDescr, uint16_t tid, uint16_t tnum); INLINE void SaveLoopData(); INLINE void RestoreLoopData() const; private: // bits for flags: (6 used, 2 free) // 3 bits (SchedMask) for runtime schedule // 1 bit (InPar) if this thread has encountered one or more parallel region // 1 bit (IsParConstr) if ICV for a parallel region (false = explicit task) // 1 bit (InParL2+) if this thread has encountered L2 or higher parallel // region static const uint8_t TaskDescr_SchedMask = (0x1 | 0x2 | 0x4); static const uint8_t TaskDescr_InPar = 0x10; static const uint8_t TaskDescr_IsParConstr = 0x20; static const uint8_t TaskDescr_InParL2P = 0x40; struct SavedLoopDescr_items { int64_t loopUpperBound; int64_t nextLowerBound; int64_t chunk; int64_t stride; kmp_sched_t schedule; } loopData; struct TaskDescr_items { uint8_t flags; // 6 bit used (see flag above) uint8_t unused; uint16_t threadId; // thread id uint64_t runtimeChunkSize; // runtime chunk size } items; omptarget_nvptx_TaskDescr *prev; }; // build on kmp typedef struct omptarget_nvptx_ExplicitTaskDescr { omptarget_nvptx_TaskDescr taskDescr; // omptarget_nvptx task description (must be first) kmp_TaskDescr kmpTaskDescr; // kmp task description (must be last) } omptarget_nvptx_ExplicitTaskDescr; //////////////////////////////////////////////////////////////////////////////// // Descriptor of a parallel region (worksharing in general) class omptarget_nvptx_WorkDescr { public: // access to data INLINE omptarget_nvptx_TaskDescr *WorkTaskDescr() { return &masterTaskICV; } private: omptarget_nvptx_TaskDescr masterTaskICV; }; //////////////////////////////////////////////////////////////////////////////// class omptarget_nvptx_TeamDescr { public: // access to data INLINE omptarget_nvptx_TaskDescr *LevelZeroTaskDescr() { return &levelZeroTaskDescr; } INLINE omptarget_nvptx_WorkDescr &WorkDescr() { return workDescrForActiveParallel; } // init INLINE void InitTeamDescr(); INLINE __kmpc_data_sharing_slot *GetPreallocatedSlotAddr(int wid) { worker_rootS[wid].DataEnd = &worker_rootS[wid].Data[0] + DS_Worker_Warp_Slot_Size; // We currently do not have a next slot. worker_rootS[wid].Next = 0; worker_rootS[wid].Prev = 0; worker_rootS[wid].PrevSlotStackPtr = 0; return (__kmpc_data_sharing_slot *)&worker_rootS[wid]; } private: omptarget_nvptx_TaskDescr levelZeroTaskDescr; // icv for team master initial thread omptarget_nvptx_WorkDescr workDescrForActiveParallel; // one, ONLY for the active par ALIGN(16) __kmpc_data_sharing_slot worker_rootS[DS_Max_Warp_Number]; }; //////////////////////////////////////////////////////////////////////////////// // thread private data (struct of arrays for better coalescing) // tid refers here to the global thread id // do not support multiple concurrent kernel a this time class omptarget_nvptx_ThreadPrivateContext { public: // task INLINE omptarget_nvptx_TaskDescr *Level1TaskDescr(int tid) { return &levelOneTaskDescr[tid]; } INLINE void SetTopLevelTaskDescr(int tid, omptarget_nvptx_TaskDescr *taskICV) { topTaskDescr[tid] = taskICV; } INLINE omptarget_nvptx_TaskDescr *GetTopLevelTaskDescr(int tid) const; // parallel INLINE uint16_t &NumThreadsForNextParallel(int tid) { return nextRegion.tnum[tid]; } // schedule (for dispatch) INLINE kmp_sched_t &ScheduleType(int tid) { return schedule[tid]; } INLINE int64_t &Chunk(int tid) { return chunk[tid]; } INLINE int64_t &LoopUpperBound(int tid) { return loopUpperBound[tid]; } INLINE int64_t &NextLowerBound(int tid) { return nextLowerBound[tid]; } INLINE int64_t &Stride(int tid) { return stride[tid]; } INLINE omptarget_nvptx_TeamDescr &TeamContext() { return teamContext; } INLINE void InitThreadPrivateContext(int tid); INLINE uint64_t &Cnt() { return cnt; } private: // team context for this team omptarget_nvptx_TeamDescr teamContext; // task ICV for implicit threads in the only parallel region omptarget_nvptx_TaskDescr levelOneTaskDescr[MAX_THREADS_PER_TEAM]; // pointer where to find the current task ICV (top of the stack) omptarget_nvptx_TaskDescr *topTaskDescr[MAX_THREADS_PER_TEAM]; union { // Only one of the two is live at the same time. // parallel uint16_t tnum[MAX_THREADS_PER_TEAM]; } nextRegion; // schedule (for dispatch) kmp_sched_t schedule[MAX_THREADS_PER_TEAM]; // remember schedule type for #for int64_t chunk[MAX_THREADS_PER_TEAM]; int64_t loopUpperBound[MAX_THREADS_PER_TEAM]; // state for dispatch with dyn/guided OR static (never use both at a time) int64_t nextLowerBound[MAX_THREADS_PER_TEAM]; int64_t stride[MAX_THREADS_PER_TEAM]; uint64_t cnt; }; /// Memory manager for statically allocated memory. class omptarget_nvptx_SimpleMemoryManager { private: struct MemDataTy { volatile unsigned keys[OMP_STATE_COUNT]; } MemData[MAX_SM] ALIGN(128); INLINE static uint32_t hash(unsigned key) { return key & (OMP_STATE_COUNT - 1); } public: INLINE void Release(); INLINE const void *Acquire(const void *buf, size_t size); }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // global data tables //////////////////////////////////////////////////////////////////////////////// extern DEVICE omptarget_nvptx_SimpleMemoryManager omptarget_nvptx_simpleMemoryManager; extern DEVICE uint32_t EXTERN_SHARED(usedMemIdx); extern DEVICE uint32_t EXTERN_SHARED(usedSlotIdx); #if _OPENMP extern DEVICE uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE]; #pragma omp allocate(parallelLevel) allocator(omp_pteam_mem_alloc) #else extern DEVICE uint8_t EXTERN_SHARED(parallelLevel)[MAX_THREADS_PER_TEAM / WARPSIZE]; #endif extern DEVICE uint16_t EXTERN_SHARED(threadLimit); extern DEVICE uint16_t EXTERN_SHARED(threadsInTeam); extern DEVICE uint16_t EXTERN_SHARED(nThreads); extern DEVICE omptarget_nvptx_ThreadPrivateContext * EXTERN_SHARED(omptarget_nvptx_threadPrivateContext); extern DEVICE uint32_t EXTERN_SHARED(execution_param); extern DEVICE void *EXTERN_SHARED(ReductionScratchpadPtr); //////////////////////////////////////////////////////////////////////////////// // work function (outlined parallel/simd functions) and arguments. // needed for L1 parallelism only. //////////////////////////////////////////////////////////////////////////////// typedef void *omptarget_nvptx_WorkFn; extern volatile DEVICE omptarget_nvptx_WorkFn EXTERN_SHARED(omptarget_nvptx_workFn); //////////////////////////////////////////////////////////////////////////////// // get private data structures //////////////////////////////////////////////////////////////////////////////// INLINE omptarget_nvptx_TeamDescr &getMyTeamDescriptor(); INLINE omptarget_nvptx_WorkDescr &getMyWorkDescriptor(); INLINE omptarget_nvptx_TaskDescr * getMyTopTaskDescriptor(bool isSPMDExecutionMode); INLINE omptarget_nvptx_TaskDescr *getMyTopTaskDescriptor(int globalThreadId); //////////////////////////////////////////////////////////////////////////////// // inlined implementation //////////////////////////////////////////////////////////////////////////////// INLINE uint32_t __kmpc_impl_ffs(uint32_t x) { return __builtin_ffs(x); } INLINE uint32_t __kmpc_impl_popc(uint32_t x) { return __builtin_popcount(x); } INLINE uint32_t __kmpc_impl_ffs(uint64_t x) { return __builtin_ffsl(x); } INLINE uint32_t __kmpc_impl_popc(uint64_t x) { return __builtin_popcountl(x); } #include "common/omptargeti.h" #endif
GrB_Matrix_exportHint.c
//------------------------------------------------------------------------------ // GrB_Matrix_exportHint: determine sizes of arrays for GrB_Matrix_export //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB_transpose.h" #define GB_FREE_ALL ; GrB_Info GrB_Matrix_exportHint // suggest the best export format ( GrB_Format *format, // export format GrB_Matrix A // matrix to export ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Matrix_exportHint (&format, A)") ; GB_BURBLE_START ("GrB_Matrix_exportHint") ; GB_RETURN_IF_NULL (format) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; // finish any pending work since this can change the sparsity of A GB_MATRIX_WAIT (A) ; int sparsity = GB_sparsity (A) ; bool is_csc = A->is_csc ; //-------------------------------------------------------------------------- // determine format that requires the least amount of modification //-------------------------------------------------------------------------- switch (sparsity) { default: case GxB_SPARSE : // CSR and CSC formats are supported by GraphBLAS, so if the matrix // is sparse by-row or sparse by-column, then suggest CSR or CSC. // The matrix can be exported with no change at all. case GxB_BITMAP : // Bitmap is not supported as a GrB_Format. It cannot be exported // as full, in general, so select CSR or CSC. (*format) = is_csc ? GrB_CSC_FORMAT : GrB_CSR_FORMAT ; break ; case GxB_HYPERSPARSE : // Hypersparse is not supported as a GrB_Format. Expanding a huge // hypersparse matrix to sparse can be costly, so suggest COO. (*format) = GrB_COO_FORMAT ; break ; case GxB_FULL : // Full is not supported by GraphBLAS (*format) = is_csc ? GrB_CSC_FORMAT : GrB_CSR_FORMAT ; // if full was supported by GraphBLAS; // (*format) = is_csc ? GrB_DENSE_COL_FORMAT : GrB_DENSE_ROW_FORMAT ; break ; } GB_BURBLE_END ; #pragma omp flush return (GrB_SUCCESS) ; }
mkldnn_quantize_v2-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file mkldnn_quantize_v2-inl.h * \brief */ #ifndef MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_ #define MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_ #if MXNET_USE_MKLDNN == 1 #include <algorithm> #include <string> #include <vector> #include "../../nn/mkldnn/mkldnn_base-inl.h" #include "../quantize_v2-inl.h" namespace mxnet { namespace op { class SgMKLDNNQuantizeOperator { public: explicit SgMKLDNNQuantizeOperator(const nnvm::NodeAttrs &attrs) : param_(nnvm::get<QuantizeV2Param>(attrs.parsed)) {} void Forward(const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs); private: bool initalized_{false}; QuantizeV2Param param_; float cached_data_min_{0.f}; float cached_data_max_{0.f}; std::shared_ptr<mkldnn::memory> i_mem_; std::shared_ptr<mkldnn::memory> o_mem_; std::shared_ptr<mkldnn::reorder> fwd_pd_; }; void SgMKLDNNQuantizeOperator::Forward(const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { float quantized_range = 0.0; NDArray in_buffer = inputs[0]; float data_min = mshadow::red::limits::MaxValue<float>(); float data_max = mshadow::red::limits::MinValue<float>(); // Pass through quantized data if (inputs[0].dtype() == mshadow::kUint8 || inputs[0].dtype() == mshadow::kInt8) { if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) { *outputs[1].data().dptr<float>() = param_.min_calib_range.value(); *outputs[2].data().dptr<float>() = param_.max_calib_range.value(); } else { if (inputs[0].dtype() == mshadow::kUint8) { *outputs[1].data().dptr<float>() = 0; *outputs[2].data().dptr<float>() = 255; } else { *outputs[1].data().dptr<float>() = -127; *outputs[2].data().dptr<float>() = 127; } } if (req[0] != kWriteInplace) { const_cast<NDArray &>(outputs[0]).CopyFrom(*inputs[0].GetMKLDNNData()); MKLDNNStream::Get()->Submit(); } } else { if (in_buffer.IsView() && in_buffer.IsMKLDNNData()) in_buffer = inputs[0].Reorder2Default(); auto i_mem = in_buffer.GetMKLDNNData(); if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) { data_min = param_.min_calib_range.value(); data_max = param_.max_calib_range.value(); } else { // no calib info in_buffer = inputs[0].Reorder2Default(); auto in_ptr = in_buffer.data().dptr<float>(); auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); std::vector<float> data_maxs(nthreads, data_max); std::vector<float> data_mins(nthreads, data_min); #pragma omp parallel for num_threads(nthreads) for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) { int tid = omp_get_thread_num(); if (in_ptr[i] > data_maxs[tid]) data_maxs[tid] = in_ptr[i]; if (in_ptr[i] < data_mins[tid]) data_mins[tid] = in_ptr[i]; } for (index_t i = 0; i < nthreads; i++) { if (data_maxs[i] > data_max) data_max = data_maxs[i]; if (data_mins[i] < data_min) data_min = data_mins[i]; } if (initalized_ && (cached_data_min_ != data_min || cached_data_max_ != data_max)) initalized_ = false; } // Write output min/max auto out_type = GetQuantizeOutputType(param_); if (out_type == mshadow::kUint8) { quantized_range = kUint8Range; *outputs[1].data().dptr<float>() = data_min; *outputs[2].data().dptr<float>() = data_max; } else if (out_type == mshadow::kInt8) { float real_range = MaxAbs(data_min, data_max); quantized_range = kInt8Range; *outputs[1].data().dptr<float>() = -real_range; *outputs[2].data().dptr<float>() = real_range; } else { LOG(FATAL) << "mkldnn quantize op only supports int8 and uint8 as output type"; } if (!initalized_) { cached_data_min_ = data_min; cached_data_max_ = data_max; float real_range = MaxAbs(data_min, data_max); float scale = quantized_range / real_range; primitive_attr attr; const int mask = 0; std::vector<float> scales = {scale}; attr.set_output_scales(mask, scales); attr.set_int_output_round_mode(round_nearest); mkldnn::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine(); auto i_mpd = i_mem->get_primitive_desc(); auto i_desc = i_mpd.desc(); mkldnn::memory::format i_fmt = static_cast<mkldnn::memory::format>(i_desc.data.format); if (i_fmt == mkldnn::memory::format::nchw || i_fmt == mkldnn::memory::format::nChw8c || i_fmt == mkldnn_nChw16c) { i_fmt = mkldnn::memory::format::nhwc; } size_t i_ndim = in_buffer.shape().ndim(); mkldnn::memory::dims i_dims = mkldnn::memory::dims(i_ndim); for (size_t i = 0; i < i_ndim; i++) { i_dims[i] = static_cast<int>(in_buffer.shape()[i]); } auto o_desc = mkldnn::memory::desc(i_dims, get_mkldnn_type(out_type), i_fmt); auto o_mpd = memory::primitive_desc(o_desc, cpu_engine); auto reorder_pd = reorder::primitive_desc(i_mpd, o_mpd, attr); i_mem_ = std::make_shared<mkldnn::memory>(i_mpd, nullptr); o_mem_ = std::make_shared<mkldnn::memory>(o_mpd, nullptr); fwd_pd_ = std::make_shared<mkldnn::reorder>(reorder_pd, *i_mem_, *o_mem_); initalized_ = true; } auto o_mem = CreateMKLDNNMem(outputs[0], o_mem_->get_primitive_desc(), req[0]); i_mem_->set_data_handle(i_mem->get_data_handle()); o_mem_->set_data_handle(o_mem.second->get_data_handle()); MKLDNNStream::Get()->RegisterPrim(*fwd_pd_); CommitOutput(outputs[0], o_mem); MKLDNNStream::Get()->Submit(); } } static void SgMKLDNNQuantizeForward(const OpStatePtr &state_ptr, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { SgMKLDNNQuantizeOperator &op = state_ptr.get_state<SgMKLDNNQuantizeOperator>(); op.Forward(ctx, inputs, req, outputs); } } // namespace op } // namespace mxnet #endif // MXNET_USE_MKLDNN == 1 #endif // MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
DRB003-antidep2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A two-level loop nest with loop carried anti-dependence on the outer level. Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18 */ #include <stdio.h> #include <omp.h> int main(int argc,char *argv[]) { int i; int j; int len = 20; double a[20][20]; #pragma omp parallel for private (i,j) for (i = 0; i <= len - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= len - 1; j += 1) { a[i][j] = 0.5; } } for (i = 0; i <= len - 1 - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= len - 1; j += 1) { a[i][j] += a[i + 1][j]; } } printf("a[10][10]=%lf\n",a[10][10]); return 0; }
[Gyan]-10_omp_producer_consumer_unbounded.c
#include<stdio.h> #include<stdlib.h> #include<omp.h> struct node { int dat; struct node *next; }; typedef struct node node; int count = 0; int enqueue(node **head, int d); int dequeue(node **head); void disp(node *head); void producer(); void consumer(); int main() { node *head = NULL; int id; omp_set_dynamic(0); #pragma omp parallel num_threads(2) { id = omp_get_thread_num(); if(id == 0) { while(1) { producer(&head); fgetc(stdin); } } else { while(1) { consumer(&head); fgetc(stdin); } } } return 0; } void producer(node **head) { #pragma omp critical { int x = enqueue(head, count+1); if(!x) printf("producer failed! memory full! \n"); else { count++; printf("produced %d \n", count); } } } void consumer(node **head) { #pragma omp critical { int x = dequeue(head); if(!x) printf("nothing left to consume!! \n"); else printf("consumed %d \n", x); } } int enqueue(node **head, int d) { node *nn = (node *)malloc(sizeof(node)); if(nn == NULL)//allocation failed return 0; nn->dat = d; nn->next = NULL; if(*head == NULL) *head = nn; else { node *ptr = *head; while(ptr->next != NULL) ptr = ptr->next; ptr->next = nn; } return 1; } int dequeue(node **head) { int tmp; node *n; if(*head == NULL)//empty return 0; else { n = *head; tmp = (*head)->dat; *head = (*head)->next; free(n); return tmp; } } void disp(node *head) { node *p = head; printf("List : "); while(p != NULL) { printf("%d ", p->dat); p = p->next; } printf("\n"); }
binary_tree.c
// The Computer Language Benchmarks Game // https://salsa.debian.org/benchmarksgame-team/benchmarksgame/ // // Contributed by Jeremy Zerfas // Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho. // *reset* // This controls the width of lines that are output by this program. #define MAXIMUM_LINE_WIDTH 60 #include <stdint.h> #include <stdlib.h> #include <stdio.h> typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems. #include <apr-1/apr_pools.h> // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; typedef struct tree_node{ struct tree_node * left_Node, * right_Node; } tree_node; // Create a binary tree of depth tree_Depth in memory_Pool, set the root node's // value to root_Node_Value, and finally return a pointer to the created binary // tree. static inline tree_node * create_Tree(const intnative_t tree_Depth, apr_pool_t * const memory_Pool){ tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node)); // If tree_Depth is one or more then recursively call create_Tree() in order // to create the left and right subtrees using 2*root_Node_Value-1 and // 2*root_Node_Value respectively as the root values for those subtrees. if(tree_Depth>0){ root_Node->left_Node=create_Tree(tree_Depth-1, memory_Pool); root_Node->right_Node=create_Tree(tree_Depth-1, memory_Pool); }else root_Node->left_Node=root_Node->right_Node=NULL; return root_Node; } // Compute and return the checksum for the binary tree that has root_Node as the // root node. static inline intnative_t compute_Tree_Checksum( const tree_node * const root_Node){ // If there are subtrees then recursively call compute_Tree_Checksum() on // them and factor their values into the checksum, otherwise just return // the value of root_Node. if(root_Node->left_Node) return compute_Tree_Checksum(root_Node->left_Node)+ compute_Tree_Checksum(root_Node->right_Node)+1; else return 1; } int main(int argc, char ** argv){ // Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what // was specified as the argument to the program and minimum_Tree_Depth+2. const intnative_t minimum_Tree_Depth=4; intnative_t maximum_Tree_Depth=atoi(argv[1]); if(maximum_Tree_Depth < minimum_Tree_Depth+2) maximum_Tree_Depth=minimum_Tree_Depth+2; apr_initialize(); apr_pool_t * memory_Pool; // Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1, // compute the checksum of the binary tree, print the statistics, and then // delete the memory pool. apr_pool_create_unmanaged(&memory_Pool); tree_node * stretch_Tree=create_Tree(maximum_Tree_Depth+1, memory_Pool); printf("stretch tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth+1, (intmax_t)compute_Tree_Checksum(stretch_Tree)); apr_pool_destroy(memory_Pool); // Create a memory pool and then create a long-lived binary tree of depth // maximum_Tree_Depth which will be left alone for a while while // more binary trees get allocated and deallocaited as required by the // rules. We'll finish working with this later. apr_pool_create_unmanaged(&memory_Pool); tree_node * long_Lived_Tree=create_Tree(maximum_Tree_Depth, memory_Pool); // Create a lot of binary trees in parallel of depths ranging from // minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their // checksums, destroy the trees, and then record the statistics to // output_Buffer[] so they can be displayed in order later. char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1]; intnative_t current_Tree_Depth; #pragma omp parallel for for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){ intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+ minimum_Tree_Depth); // Create a memory pool for this thread to use. apr_pool_t * thread_Memory_Pool; apr_pool_create_unmanaged(&thread_Memory_Pool); intnative_t i=1, total_Trees_Checksum=0; for(; i<=iterations; ++i){ // Create a binary tree of depth current_Tree_Depth tree_node * const tree_1=create_Tree(current_Tree_Depth, thread_Memory_Pool); total_Trees_Checksum+=compute_Tree_Checksum(tree_1); apr_pool_clear(thread_Memory_Pool); } apr_pool_destroy(thread_Memory_Pool); // Record the statistics for the trees of depth current_Tree_Depth. sprintf(output_Buffer[current_Tree_Depth], "%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)iterations, (intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum); } // Print the statistics for all of the various tree depths. for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2) printf("%s", output_Buffer[current_Tree_Depth]); // Compute the checksum of the long-lived binary tree that we created // earlier, print the statistics, and then delete the memory pool. printf("long lived tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth, (intmax_t)compute_Tree_Checksum(long_Lived_Tree)); apr_pool_destroy(memory_Pool); apr_terminate(); return 0; }
bug_set_schedule_0.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> #include "omp_testsuite.h" /* Test that the chunk size is set to default (1) when chunk size <= 0 is specified */ int a = 0; int test_set_schedule_0() { int i; a = 0; omp_set_schedule(omp_sched_dynamic,0); #pragma omp parallel { #pragma omp for schedule(runtime) for(i = 0; i < 10; i++) { #pragma omp atomic a++; if(a > 10) exit(1); } } return a==10; } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_set_schedule_0()) { num_failed++; } } return num_failed; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__rdiv_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp64) // A*D function (colscale): GB (_AxD__rdiv_fp64) // D*A function (rowscale): GB (_DxB__rdiv_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp64) // C=scalar+B GB (_bind1st__rdiv_fp64) // C=scalar+B' GB (_bind1st_tran__rdiv_fp64) // C=A+scalar GB (_bind2nd__rdiv_fp64) // C=A'+scalar GB (_bind2nd_tran__rdiv_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (bij / aij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y / x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FP64 || GxB_NO_RDIV_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (bij / x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (y / aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij / x) ; \ } GrB_Info GB (_bind1st_tran__rdiv_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y / aij) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ordering_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file ordering_op-inl.h * \brief Function definition of ordering operators */ #ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #include <mxnet/operator_util.h> #include <dmlc/optional.h> #include <mshadow/tensor.h> #include <algorithm> #include <vector> #include <string> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "./sort_op.h" #include "./indexing_op.h" #include "../../api/operator/op_utils.h" namespace mshadow { template<typename xpu, int src_dim, typename DType, int dst_dim> inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src, Shape<dst_dim> target_shape) { CHECK_EQ(src.CheckContiguous(), true); return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_); } }; namespace mxnet { namespace op { // These enums are only visible within this header namespace topk_enum { enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth}; } // topk_enum struct TopKParam : public dmlc::Parameter<TopKParam> { dmlc::optional<int> axis; int k; int ret_typ; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(TopKParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose the top k indices." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(k).set_default(1) .describe("Number of top elements to select," " should be always smaller than or equal to the element number in the given axis." " A global sort is performed if set k < 1."); DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices) .add_enum("value", topk_enum::kReturnValue) .add_enum("indices", topk_enum::kReturnIndices) .add_enum("mask", topk_enum::kReturnMask) .add_enum("both", topk_enum::kReturnBoth) .describe("The return type.\n" " \"value\" means to return the top k values," " \"indices\" means to return the indices of the top k values," " \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values." " \"both\" means to return a list of both values and indices of top k elements."); DMLC_DECLARE_FIELD(is_ascend).set_default(false) .describe("Whether to choose k largest or k smallest elements." " Top K largest elements will be chosen if set to false."); DMLC_DECLARE_FIELD(dtype) // TODO(srivrohi): remove support for real data type in mxnet-2.0 .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("int64", mshadow::kInt64) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices when ret_typ is \"indices\" or \"both\". " "An error will be raised if the selected data type cannot precisely represent the " "indices."); } std::string ReturnType2String(int ret_typ) { switch (ret_typ) { case topk_enum::kReturnValue: return "value"; case topk_enum::kReturnIndices: return "indices"; case topk_enum::kReturnMask: return "mask"; case topk_enum::kReturnBoth: return "both"; default: LOG(FATAL) << "Unknown return type enum " << ret_typ; } LOG(FATAL) << "should not reach here "; return ""; } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, k_s, ret_typ_s, is_ascend_s, dtype_s; axis_s << axis; k_s << k; dtype_s << dtype; ret_typ_s << ret_typ; is_ascend_s << is_ascend; (*dict)["axis"] = axis_s.str(); (*dict)["k"] = k_s.str(); (*dict)["ret_typ"] = ReturnType2String(ret_typ); (*dict)["is_ascend"] = is_ascend_s.str(); (*dict)["dtype"] = MXNetTypeWithBool2String(dtype); } }; struct SortParam : public dmlc::Parameter<SortParam> { dmlc::optional<int> axis; bool is_ascend; DMLC_DECLARE_PARAMETER(SortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, is_ascend_s; axis_s << axis; is_ascend_s << is_ascend; (*dict)["axis"] = axis_s.str(); (*dict)["is_ascend_s"] = is_ascend_s.str(); } }; struct ArgSortParam : public dmlc::Parameter<ArgSortParam> { dmlc::optional<int> axis; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(ArgSortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); DMLC_DECLARE_FIELD(dtype) // TODO(srivrohi): remove support for real data type in mxnet-2.0 .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("int64", mshadow::kInt64) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or" " \"both\". An error will be raised if the selected data type cannot precisely " "represent the indices."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, is_ascend_s, dtype_s; axis_s << axis; is_ascend_s << is_ascend; dtype_s << dtype; (*dict)["axis"] = axis_s.str(); (*dict)["is_ascend_s"] = is_ascend_s.str(); (*dict)["dtype"] = MXNetTypeWithBool2String(dtype); } }; template<typename IDXType = index_t> inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape, size_t *batch_size, IDXType *element_num, int *axis, IDXType *k, bool *do_transpose, bool *is_ascend) { *do_transpose = false; *k = param.k; *is_ascend = param.is_ascend; // get batch_size, axis and element_num if (!static_cast<bool>(param.axis)) { // No axis given *axis = 0; *batch_size = 1; *element_num = src_shape.Size(); } else { *axis = param.axis.value(); if (*axis < 0) { *axis += src_shape.ndim(); } CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim())) << "Invalid axis! axis should be between 0 and " << src_shape.ndim() << ", found axis=" << *axis; if (src_shape[*axis] != 0) { *batch_size = src_shape.Size() / src_shape[*axis]; } *element_num = src_shape[*axis]; if (*axis != src_shape.ndim() - 1) { *do_transpose = true; } } // get k if (param.k <= 0) { *k = *element_num; } // get target_shape if (!static_cast<bool>(param.axis)) { if (param.ret_typ != topk_enum::kReturnMask) { *target_shape = mshadow::Shape1(*k); } else { *target_shape = src_shape; } } else { *target_shape = src_shape; if (param.ret_typ != topk_enum::kReturnMask) { (*target_shape)[*axis] = *k; } } CHECK(*k >= 0 && *k <= *element_num) << "k must be smaller than " << *element_num << ", get k = " << *k; } using namespace mshadow; struct fill_ind_to_one { template<typename DType, typename IDXType> MSHADOW_XINLINE static void Map(index_t i, const IDXType* indices, DType* out) { out[indices[i]] = static_cast<DType>(1); } }; struct fill_ind { template<typename DType, typename IDXType> MSHADOW_XINLINE static void Map(index_t i, const IDXType* indices, const DType* val, int req, DType* out) { KERNEL_ASSIGN(out[indices[i]], req, val[i]); } }; template<typename DType, typename IDXType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat, const Tensor<cpu, 1, IDXType>& ind, const Tensor<cpu, 1, char>& work, IDXType K, IDXType N, bool is_ascend, Stream<cpu> *s) { // Use full sort when K is relatively large. const bool full_sort(K*8 > N); // Batch size. const size_t M(work.size(0)/(sizeof(DType)*N)); const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(M); ++i) { // Tensor `work` stores the flattened source data, while `dat` stores the sorted result. DType *vals = reinterpret_cast<DType*>(work.dptr_); DType *sorted_vals = dat.dptr_+i*N; IDXType *indices = ind.dptr_+i*N; if (is_ascend) { if (full_sort) { std::sort(indices, indices+N, [&](const IDXType& i1, const IDXType& i2){ return vals[i1] < vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const IDXType& i1, const IDXType& i2){ return vals[i1] < vals[i2]; }); } } else { if (full_sort) { std::sort(indices, indices+N, [&](const IDXType& i1, const IDXType& i2){ return vals[i1] > vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const IDXType& i1, const IDXType& i2){ return vals[i1] > vals[i2]; }); } } for (IDXType j = 0; j < K; ++j) { sorted_vals[j] = vals[indices[j]]; } } } #ifdef __CUDACC__ template<typename DType, typename IDXType> MSHADOW_XINLINE bool TopKCompare(DType val1, IDXType ind1, DType val2, IDXType ind2, bool is_ascend) { // Negative indices denote undefined values which are considered arbitrary small resp. large. return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2))); } template<typename DType, typename IDXType> MSHADOW_XINLINE void MergeTopK(IDXType K, DType *val1, IDXType *ind1, DType *val2, IDXType *ind2, bool is_ascend) { // In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals // [0,..,i1], [0,..i2] of the two lists that will be part of the merged list. IDXType i1(K-1), i2(K-1); for (IDXType i = 0; i < K; ++i) { if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) { --i2; } else { --i1; } } // Now merge the lists from back to front. for (IDXType i = K; i--;) { if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) { val1[i] = val1[i1]; ind1[i] = ind1[i1]; --i1; } else { val1[i] = val2[i2]; ind1[i] = ind2[i2]; --i2; } } } template<typename DType, typename IDXType> __global__ void PartialSortSmallK(IDXType K, IDXType N, DType *val, IDXType *ind, bool is_ascend) { // Buffer for pairwise reduction. extern __shared__ __align__(sizeof(IDXType)) unsigned char temp_smem[]; IDXType *buff = reinterpret_cast<IDXType *>(temp_smem); // Start of buffer sections associated with this thread. const IDXType offset(threadIdx.x*K); IDXType *ind_buff = reinterpret_cast<IDXType*>(&buff[offset]); DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset; // Initialize top-K values for this thread. for (IDXType i = 0; i < K; ++i) { ind_buff[i] = -1; } // Range of values this thread cares about. Each thread block processes // a different batch item (i.e. a different set of ind/val where we // have to select the top-K elements). All threads within the same // block work on the same batch item. const IDXType first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N); // Select top-K from this range and store it sorted in the buffer. // We assume a small K, so linear insertion is o.k. for (IDXType i = first; i < last; i += blockDim.x) { DType cur_val(val[i]); IDXType cur_ind(ind[i]); for (IDXType j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) { if (j+1 < K) { val_buff[j+1] = val_buff[j]; ind_buff[j+1] = ind_buff[j]; } val_buff[j] = cur_val; ind_buff[j] = cur_ind; } } // Recursive merge of sorted lists for this thread block. Note that blockDim.x is not // necessary a power of two, therefore the additional checks for last_s. for (IDXType s = (blockDim.x+1)/2, last_s = blockDim.x; last_s > 1; last_s = s, s = (s+1)/2) { __syncthreads(); if (threadIdx.x < s && threadIdx.x+s < last_s) { MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend); } } // Final updates on master thread. if (threadIdx.x == 0) { for (IDXType i = 0; i < K; ++i) { ind[blockIdx.x*N+i] = ind_buff[i]; val[blockIdx.x*N+i] = val_buff[i]; } } } template<typename DType, typename IDXType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat, const Tensor<gpu, 1, IDXType>& ind, const Tensor<gpu, 1, char>& work, IDXType K, IDXType N, bool is_ascend, Stream<gpu> *s) { // Use full sort for all but very small K for which we // can do a partial sort entirely within shared memory. const bool full_sort(K > 5); // Batch size. const size_t M(dat.size(0)/N); if (full_sort) { // Divide workspace into two parts. The first one is needed to store batch ids. size_t alignment = std::max(sizeof(DType), sizeof(IDXType)); size_t id_size = PadBytes(sizeof(IDXType) * ind.size(0), alignment); Tensor<gpu, 1, IDXType> batch_id(reinterpret_cast<IDXType*>(work.dptr_), Shape1(ind.size(0)), s); Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s); mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work); if (M > 1) { // Back to back sorting. Note that mxnet::op::SortByKey is a stable sort. batch_id = ind / N; mxnet::op::SortByKey(batch_id, dat, true, &sort_work); batch_id = ind / N; mxnet::op::SortByKey(batch_id, ind, true, &sort_work); } } else { const IDXType nthreads(mshadow::cuda::kBaseThreadNum); PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(IDXType)+sizeof(DType)), mshadow::Stream<gpu>::GetStream(s)>>> (K, N, dat.dptr_, ind.dptr_, is_ascend); } } #endif /*! * \brief Implementation of the TopK operation * * * \param ctx the running context * \param resource temporary resource handler * \param src the Source blob * \param ret the destination blobs * \param param the topk parameters * \tparam xpu the device type. * \tparam DType type of the output value/mask. * \tparam IDType type of the output indices. */ template<typename xpu, typename DType, typename IDType, typename IDXType> void TopKImpl(const RunContext &ctx, const Resource &resource, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param) { using namespace mshadow; using namespace mshadow::expr; // 0. If input shape is 0-shape, directly return if (src.Size() == 0) return; // 1. Parse and initialize information Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, IDXType> indices, sel_indices; size_t batch_size = 0; IDXType element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; IDXType k = 0; size_t alignment = std::max(sizeof(DType), sizeof(IDXType)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDXType>()) << "'index_t' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<IDXType>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); // Temp space needed by the full sorts. size_t temp_size = std::max( mxnet::op::SortByKeyWorkspaceSize<IDXType, DType, xpu>(src.Size()), mxnet::op::SortByKeyWorkspaceSize<DType, IDXType, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<IDXType, IDXType, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(IDXType) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(DType) * src.Size()); size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(IDXType) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(IDXType) * batch_size * k, alignment); } workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); char* workspace_curr_ptr = workspace.dptr_; sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, IDXType>(reinterpret_cast<IDXType*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(IDXType) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, IDXType>(reinterpret_cast<IDXType*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(IDXType) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, IDXType{0}, IDXType{1}, kWriteTo, reinterpret_cast<IDXType*>(indices.dptr_)); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu, typename DType> size_t TopKWorkspaceSize(const TBlob& src, const TopKParam& param, size_t *temp_size_ptr) { using namespace mshadow; using namespace mshadow::expr; size_t batch_size = 0; size_t temp_size; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); // Temp space needed by the full sorts. temp_size = std::max( mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()), mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(DType) * src.Size()); *temp_size_ptr = temp_size; size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment); } return workspace_size; } template<typename xpu, typename DType, typename IDType> void TopKImplwithWorkspace(const RunContext &ctx, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param, char* workspace_curr_ptr, const size_t &temp_size, Stream<xpu>* s) { using namespace mshadow; using namespace mshadow::expr; // 0. If input shape is 0-shape, directly return if (src.Size() == 0) return; // 1. Parse and initialize information Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, index_t> indices, sel_indices; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>()) << "'index_t' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<index_t>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, index_t{0}, index_t{1}, kWriteTo, indices.dptr_); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu> void TopK(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_NO_BFLOAT16_TYPE_SWITCH(param.dtype, IDType, { if (inputs[0].Size() >= INT_MAX) { TopKImpl<xpu, DType, IDType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); } else { TopKImpl<xpu, DType, IDType, int32_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); } }); }); } else { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (inputs[0].Size() >= INT_MAX) { TopKImpl<xpu, DType, index_t, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); } else { TopKImpl<xpu, DType, index_t, int32_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); } }); } } template<typename xpu> void Sort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (inputs[0].Size() >= INT_MAX) { TopKImpl<xpu, DType, index_t, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); } else { TopKImpl<xpu, DType, index_t, int32_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); } }); } template<typename xpu> void ArgSort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.dtype = param.dtype; topk_param.ret_typ = topk_enum::kReturnIndices; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { if (inputs[0].Size() >= INT_MAX) { TopKImpl<xpu, DType, IDType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); } else { TopKImpl<xpu, DType, IDType, int32_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); } }); }); } template<typename xpu, typename DType, typename IDType, typename IDXType> void TopKBackwardImpl(const OpContext &ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const TopKParam& param) { CHECK_NE(req[0], kWriteInplace); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>(); CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth); size_t batch_size = 0; IDXType element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; IDXType k = 0; mxnet::TShape target_shape; ParseTopKParam(outputs[0].shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDXType>()) << "'IDType' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<IDXType>() << " elements"; Tensor<xpu, 1, IDXType> workspace = ctx.requested[0].get_space_typed<xpu, 1, IDXType>(Shape1(batch_size * k + batch_size), s); Tensor<xpu, 1, IDXType> sel_indices = Tensor<xpu, 1, IDXType>(workspace.dptr_, Shape1(batch_size * k), s); Tensor<xpu, 1, IDXType> batch_shift = Tensor<xpu, 1, IDXType>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); Tensor<xpu, 2, DType> out_grad = inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s); Tensor<xpu, 2, DType> in_grad = outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s); mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, IDXType{0}, element_num, kWriteTo, batch_shift.dptr_); if (do_transpose) { Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s); mxnet::TShape src_shape = outputs[0].shape_.FlatTo3D(axis); sel_indices = reshape(transpose( broadcast_to(inplace_reshape(batch_shift, Shape3(src_shape[0], src_shape[2], 1)), mxnet::TShape(Shape3(src_shape[0], src_shape[2], k))), Shape3(0, 2, 1)), Shape1(batch_size * k)); sel_indices += tcast<IDXType>(indices); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } else { Tensor<xpu, 2, IDType> indices = inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); sel_indices = reshape(tcast<IDXType>(indices) + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)), mxnet::TShape(Shape2(batch_size, k))), Shape1(batch_size * k)); } CHECK_EQ(sel_indices.CheckContiguous(), true); if (kWriteTo == req[0] || kAddTo == req[0]) { if (kWriteTo == req[0]) { in_grad = scalar<DType>(0); } mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, out_grad.dptr_, req[0], in_grad.dptr_); } else { LOG(FATAL) << "Not Implemented!"; } } template<typename xpu> void TopKBackward_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { if (inputs[0].Size() >= INT_MAX) { TopKBackwardImpl<xpu, DType, IDType, index_t>(ctx, inputs, req, outputs, param); } else { TopKBackwardImpl<xpu, DType, IDType, int32_t>(ctx, inputs, req, outputs, param); } }); }); } else if (param.ret_typ == topk_enum::kReturnValue) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (inputs[0].Size() >= INT_MAX) { TopKBackwardImpl<xpu, DType, index_t, index_t>(ctx, inputs, req, outputs, param); } else { TopKBackwardImpl<xpu, DType, index_t, int32_t>(ctx, inputs, req, outputs, param); } }); } else { LOG(FATAL) << "Not Implemented"; } } inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { return static_cast<uint32_t>(1); } else { return static_cast<uint32_t>(2); } } inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { return static_cast<uint32_t>(2); } else { return static_cast<uint32_t>(1); } } inline bool TopKType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK(out_size == 1 || out_size == 2); // out_attr[0] -> stores value // out_attr[1] -> stores indices if (out_size > 1) { if (param.ret_typ == topk_enum::kReturnValue) { #if MXNET_USE_INT64_TENSOR_SIZE == 1 CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64)) #else CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) #endif << "Failed to set the type of ret_indices."; } else { CHECK(type_assign(&(*out_attrs)[1], param.dtype)) << "Failed to set the type of ret_indices."; } } if (param.ret_typ == topk_enum::kReturnIndices) { CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; } else { TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } return true; } inline bool TopKShapeImpl(const TopKParam& param, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { CHECK_EQ(out_attrs->size(), 1U); } else { CHECK_EQ(out_attrs->size(), 2U); } mxnet::TShape& in_shape = (*in_attrs)[0]; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; mxnet::TShape target_shape; ParseTopKParam(in_shape, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape); } return true; } inline bool TopKShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); return TopKShapeImpl(param, in_attrs, out_attrs); } inline bool SortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { int data_type = -1; size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK_EQ(out_size, 2); #if MXNET_USE_INT64_TENSOR_SIZE == 1 CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64)) #else CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) #endif << "Failed to set the type of ret_indices"; CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; if (data_type == -1) return false; return true; } inline bool SortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } inline bool ArgSortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; return true; } inline bool ArgSortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnIndices; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
mandatory_but_no_devices.c
// Check that mandatory offloading causes various offloading directives to fail // when omp_get_num_devices() == 0 even if the requested device is the initial // device. This behavior is proposed for OpenMP 5.2 in OpenMP spec github // issue 2669. // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DDIR=target // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DDIR='target teams' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DDIR='target data map(X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \ // RUN: -DDIR='target enter data map(to:X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \ // RUN: -DDIR='target exit data map(from:X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \ // RUN: -DDIR='target update to(X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \ // RUN: -DDIR='target update from(X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda #include <omp.h> #include <stdio.h> // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory int main(void) { int X; #pragma omp DIR device(omp_get_initial_device()) ; return 0; }
GB_AxB_dot2_compmask.c
//------------------------------------------------------------------------------ // GB_AxB_dot2_compmask: C<!M>=A'*B via dot products //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ { #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ collapse(2) for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++) for (int b_taskid = 0 ; b_taskid < nbslice ; b_taskid++) { //---------------------------------------------------------------------- // get A //---------------------------------------------------------------------- GrB_Matrix A = Aslice [a_taskid] ; const int64_t *restrict Ai = A->i ; #if defined ( GB_PHASE_1_OF_2 ) int64_t *restrict C_count = C_counts [a_taskid] ; #else const int64_t *restrict C_count_start = (a_taskid == 0) ? NULL : C_counts [a_taskid] ; const int64_t *restrict C_count_end = (a_taskid == naslice-1) ? NULL : C_counts [a_taskid+1] ; const GB_ATYPE *restrict Ax = A_is_pattern ? NULL : A->x ; #endif //---------------------------------------------------------------------- // C<!M>=A'*B via dot products //---------------------------------------------------------------------- for (int64_t Iter_k = B_slice [b_taskid] ; Iter_k < B_slice [b_taskid+1] ; Iter_k++) { //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ GBI_jth_iteration_with_iter (Iter, j, pB_start, pB_end) ; int64_t bjnz = pB_end - pB_start ; // no work to do if B(:,j) is empty if (bjnz == 0) continue ; //------------------------------------------------------------------ // phase 2 of 2: get the range of entries in C(:,j) to compute //------------------------------------------------------------------ #if defined ( GB_PHASE_2_OF_2 ) // this thread computes Ci and Cx [cnz:cnz_last] int64_t cnz = Cp [Iter_k] + ((C_count_start == NULL) ? 0 : C_count_start [Iter_k]) ; int64_t cnz_last = (C_count_end == NULL) ? (Cp [Iter_k+1] - 1) : (Cp [Iter_k] + C_count_end [Iter_k] - 1) ; if (cnz > cnz_last) continue ; #endif //------------------------------------------------------------------ // get M(:,j) //------------------------------------------------------------------ // find vector j in M int64_t pM, pM_end ; int64_t mpleft = 0 ; GB_lookup (M_is_hyper, Mh, Mp, &mpleft, mnvec-1, j, &pM, &pM_end) ; //------------------------------------------------------------------ // C(:,j)<!M(:,j)> = A'*B(:,j) //------------------------------------------------------------------ // get the first and last index in B(:,j) int64_t ib_first = Bi [pB_start] ; int64_t ib_last = Bi [pB_end-1] ; // for each vector A(:,i): GBI_for_each_vector_with_iter (Iter_A, A) { GBI_jth_iteration_with_iter (Iter_A, i, pA, pA_end) ; // A(:,i) and B(:,j) are both present. Check M(i,j). // FUTURE:: skip binary search if mask is dense. bool mij = false ; bool found ; int64_t pright = pM_end - 1 ; GB_BINARY_SEARCH (i, Mi, pM, pright, found) ; if (found) { cast_M (&mij, Mx +(pM*msize), 0) ; } if (!mij) { // C(i,j) = A(:,i)'*B(:,j) #include "GB_AxB_dot_cij.c" } } } } }
int_array.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_utilities.hpp" /****************************************************************************** * * Routines for hypre_IntArray struct for holding an array of integers * *****************************************************************************/ /*-------------------------------------------------------------------------- * hypre_IntArrayCreate *--------------------------------------------------------------------------*/ hypre_IntArray * hypre_IntArrayCreate( HYPRE_Int size ) { hypre_IntArray *array; array = hypre_CTAlloc(hypre_IntArray, 1, HYPRE_MEMORY_HOST); hypre_IntArrayData(array) = NULL; hypre_IntArraySize(array) = size; hypre_IntArrayMemoryLocation(array) = hypre_HandleMemoryLocation(hypre_handle()); return array; } /*-------------------------------------------------------------------------- * hypre_IntArrayDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_IntArrayDestroy( hypre_IntArray *array ) { HYPRE_Int ierr = 0; if (array) { HYPRE_MemoryLocation memory_location = hypre_IntArrayMemoryLocation(array); hypre_TFree(hypre_IntArrayData(array), memory_location); hypre_TFree(array, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_IntArrayInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_IntArrayInitialize_v2( hypre_IntArray *array, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_IntArraySize(array); HYPRE_Int ierr = 0; hypre_IntArrayMemoryLocation(array) = memory_location; /* Caveat: for pre-existing data, the memory location must be guaranteed * to be consistent with `memory_location' * Otherwise, mismatches will exist and problems will be encountered * when being used, and freed */ if ( !hypre_IntArrayData(array) ) { hypre_IntArrayData(array) = hypre_CTAlloc(HYPRE_Int, size, memory_location); } return ierr; } HYPRE_Int hypre_IntArrayInitialize( hypre_IntArray *array ) { HYPRE_Int ierr; ierr = hypre_IntArrayInitialize_v2( array, hypre_IntArrayMemoryLocation(array) ); return ierr; } /*-------------------------------------------------------------------------- * hypre_IntArrayCopy * copies data from x to y * if size of x is larger than y only the first size_y elements of x are * copied to y *--------------------------------------------------------------------------*/ HYPRE_Int hypre_IntArrayCopy( hypre_IntArray *x, hypre_IntArray *y ) { HYPRE_Int ierr = 0; size_t size = hypre_min( hypre_IntArraySize(x), hypre_IntArraySize(y) ); hypre_TMemcpy( hypre_IntArrayData(y), hypre_IntArrayData(x), HYPRE_Int, size, hypre_IntArrayMemoryLocation(y), hypre_IntArrayMemoryLocation(x) ); return ierr; } /*-------------------------------------------------------------------------- * hypre_IntArrayCloneDeep * Returns a complete copy of x - a deep copy, with its own copy of the data. *--------------------------------------------------------------------------*/ hypre_IntArray * hypre_IntArrayCloneDeep_v2( hypre_IntArray *x, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_IntArraySize(x); hypre_IntArray *y = hypre_IntArrayCreate( size ); hypre_IntArrayInitialize_v2(y, memory_location); hypre_IntArrayCopy( x, y ); return y; } hypre_IntArray * hypre_IntArrayCloneDeep( hypre_IntArray *x ) { return hypre_IntArrayCloneDeep_v2(x, hypre_IntArrayMemoryLocation(x)); } /*-------------------------------------------------------------------------- * hypre_IntArraySetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_IntArraySetConstantValues( hypre_IntArray *v, HYPRE_Int value ) { HYPRE_Int *array_data = hypre_IntArrayData(v); HYPRE_Int size = hypre_IntArraySize(v); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (size > 0) { HYPRE_THRUST_CALL( fill_n, array_data, size, value ); } #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(array_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { array_data[i] = value; } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif return ierr; }
cf72ae4_prot.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; double section1; } ; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler * timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data; int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__ ((aligned (64))) = (int (*)[nnz_sp_source_mask_vec->size[1]]) nnz_sp_source_mask_vec->data; float (*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[save_src_u_vec->size[1]]) save_src_u_vec->data; int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__ ((aligned (64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]]) source_id_vec->data; int (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__ ((aligned (64))) = (int (*)[source_mask_vec->size[1]][source_mask_vec->size[2]]) source_mask_vec->data; int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__ ((aligned (64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]]) sp_source_mask_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ bf0(damp_vec,dt,u_vec,vp_vec,t0,t1,t2,x0_blk0_size,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y0_blk0_size,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads); bf0(damp_vec,dt,u_vec,vp_vec,t0,t1,t2,x0_blk0_size,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,(y_M - y_m + 1)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads); bf0(damp_vec,dt,u_vec,vp_vec,t0,t1,t2,(x_M - x_m + 1)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,y0_blk0_size,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads); bf0(damp_vec,dt,u_vec,vp_vec,t0,t1,t2,(x_M - x_m + 1)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,(y_M - y_m + 1)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads); /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; } int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int xb_size = block_sizes[0]; for (int time = time_m, t2 = (time + 1)%(3); time <= time_M; time += 1, t2 = (time + 1)%(3)) { struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ #pragma omp parallel num_threads(nthreads_nonaffine) { int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(x_M - x_m + 1)/nthreads_nonaffine)); #pragma omp for collapse(1) schedule(dynamic,chunk_size) for (int x = x_m; x <= x_M; x += 1) { #pragma omp simd aligned(nnz_sp_source_mask,save_src_u,source_id,source_mask,sp_source_mask,u:32) for (int y = y_m; y <= y_M; y += 1) { int sp_zi_M = nnz_sp_source_mask[x][y] - 1; for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1) { int zind = sp_source_mask[x][y][sp_zi]; float r0 = save_src_u[time][source_id[x][y][zind]]*source_mask[x][y][zind]; u[t2][x + 4][y + 4][zind + 4] += r0; } } } } /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic,1) for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size) { for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= x0_blk0 + x0_blk0_size - 1; x += 1) { for (int y = y0_blk0; y <= y0_blk0 + y0_blk0_size - 1; y += 1) { #pragma omp simd aligned(damp,u,vp:32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x + 4][y + 4][z + 4]*vp[x + 4][y + 4][z + 4]); u[t2][x + 4][y + 4][z + 4] = (r6*(-r7*(u[t0][x + 4][y + 4][z + 4] - 2.0F*u[t1][x + 4][y + 4][z + 4])) + r8*(damp[x + 1][y + 1][z + 1]*u[t1][x + 4][y + 4][z + 4]) - 3.70370379e-4F*(u[t1][x + 2][y + 4][z + 4] + u[t1][x + 4][y + 2][z + 4] + u[t1][x + 4][y + 4][z + 2] + u[t1][x + 4][y + 4][z + 6] + u[t1][x + 4][y + 6][z + 4] + u[t1][x + 6][y + 4][z + 4]) + 5.92592607e-3F*(u[t1][x + 3][y + 4][z + 4] + u[t1][x + 4][y + 3][z + 4] + u[t1][x + 4][y + 4][z + 3] + u[t1][x + 4][y + 4][z + 5] + u[t1][x + 4][y + 5][z + 4] + u[t1][x + 5][y + 4][z + 4]) - 3.33333341e-2F*u[t1][x + 4][y + 4][z + 4])/(r6*r7 + r8*damp[x + 1][y + 1][z + 1]); } } } } } } }
PReLU.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/PReLU.c" #else void THNN_(PReLU_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight) { THTensor_(resizeAs)(output, input); int64_t nOutputPlane = THTensor_(numel)(weight); if (nOutputPlane == 1) { // handle shared parameter case real w = *THTensor_(data)(weight); TH_TENSOR_APPLY2(real, output, real, input, *output_data = (*input_data > 0) ? *input_data : w*(*input_data); ); return; } input = THTensor_(newContiguous)(input); int64_t bs = 1, ks = 1; { int64_t input_ndim = THTensor_(nDimension)(input); if (input->size[input_ndim > 1] != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); if (input_ndim > 1) { bs = input->size[0]; for (int d = 2; d < input_ndim; d++) { ks *= input->size[d]; } } } real *output_data = THTensor_(data)(output); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(weight); THIndex_t i, j, k; #pragma omp parallel for private(j,k) for (i = 0; i < bs; ++i) { real* n_input_data = input_data + i*nOutputPlane*ks; real* n_output_data = output_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { for (k = 0; k < ks; ++k) n_output_data[k] = (n_input_data[k] > 0) ? n_input_data[k] : weight_data[j] * n_input_data[k]; n_input_data += ks; n_output_data += ks; } } THTensor_(free)(input); } void THNN_(PReLU_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight) { THNN_CHECK_NELEMENT(input, gradOutput); THTensor_(resizeAs)(gradInput, input); int64_t nOutputPlane = THTensor_(numel)(weight); if (nOutputPlane == 1) { real w = THTensor_(data)(weight)[0]; TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, if ((*input_data) > 0) *gradInput_data = *gradOutput_data; else *gradInput_data = w * (*gradOutput_data); ); return; } input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THTensor_(newContiguous)(weight); const real *input_data = THTensor_(data)(input); const real *gradOutput_data = THTensor_(data)(gradOutput); const real *weight_data = THTensor_(data)(weight); real *gradInput_data = THTensor_(data)(gradInput); int64_t bs = 1, ks = 1; { int64_t input_ndim = THTensor_(nDimension)(input); if (input->size[input_ndim > 1] != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); if (input_ndim > 1) { bs = input->size[0]; for (int d = 2; d < input_ndim; d++) { ks *= input->size[d]; } } } THIndex_t i, j, k; #pragma omp parallel for private(j,k) for (i = 0; i < bs; ++i) { const real *n_input_data = input_data + i*nOutputPlane*ks; const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; real *n_gradInput_data = gradInput_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { real w = weight_data[j]; for (k = 0; k < ks; ++k) { if (n_input_data[k] > 0) n_gradInput_data[k] = n_gradOutput_data[k]; else n_gradInput_data[k] = n_gradOutput_data[k] * w; } n_input_data += ks; n_gradInput_data += ks; n_gradOutput_data += ks; } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } void THNN_(PReLU_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *gradWeight, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_CHECK_NELEMENT(input, gradOutput); int64_t nOutputPlane = THTensor_(numel)(weight); if (nOutputPlane == 1) { real *gradWeight_data = THTensor_(data)(gradWeight); real sum = 0; TH_TENSOR_APPLY2(real, input, real, gradOutput, if ((*input_data) <= 0) sum += (*input_data) * (*gradOutput_data); ); gradWeight_data[0] += scale * sum; return; } THArgCheck(THTensor_(isContiguous)(gradWeight), 6, "gradWeight needs to be contiguous"); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THTensor_(newContiguous)(weight); int64_t bs = 1, ks = 1; { int64_t input_ndim = THTensor_(nDimension)(input); if (input->size[input_ndim > 1] != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); if (input_ndim > 1) { bs = input->size[0]; for (int d = 2; d < input_ndim; d++) { ks *= input->size[d]; } } } const real *input_data = THTensor_(data)(input); const real *gradOutput_data = THTensor_(data)(gradOutput); real *gradWeight_data = THTensor_(data)(gradWeight); THIndex_t i, j, k; for (i = 0; i < bs; ++i) { const real *n_input_data = input_data + i*nOutputPlane*ks; const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { real sum = 0; for (k = 0; k < ks; ++k) if (n_input_data[k] <= 0) sum += n_gradOutput_data[k] * n_input_data[k]; gradWeight_data[j] += scale * sum; n_input_data += ks; n_gradOutput_data += ks; } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } #endif
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define ErrorRelativeWeight PerceptibleReciprocal(16) #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double diffusion, weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *), SetImageColormap(Image *,CubeInfo *,ExceptionInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DefineImageColormap(Image *,CubeInfo *,NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); if (SetImageColormap(image,cube_info,exception) == MagickFalse) return(MagickFalse); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; Quantum *magick_restrict q; ssize_t count, x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; const NodeInfo *node_info; ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && (IsGrayColorspace(cube_info->quantize_info->colorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if (image->colors > 1) { intensity=0.0; if (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1)) intensity=(double) QuantumRange; } image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; double bisect; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace, exception); } midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha, beta, distance, pixel; DoublePixelPacket *magick_restrict q; PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*p->alpha); beta=(MagickRealType) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the DefineImageColormap method is: % % void DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha; PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; Quantum *magick_restrict q; size_t index; ssize_t x, v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16; pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16; pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=cube_info->diffusion*previous[u+v].red/16; pixel.green+=cube_info->diffusion*previous[u+v].green/16; pixel.blue+=cube_info->diffusion*previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16; } pixel.red+=5.0*cube_info->diffusion*previous[u].red/16; pixel.green+=5.0*cube_info->diffusion*previous[u].green/16; pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16; pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16; pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { NodeInfo *node_info; size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CubeInfo *p; DoublePixelPacket color, pixel; MagickBooleanType proceed; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { Quantum *magick_restrict q; ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].red; pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].green; pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType Riemersma(Image *image,CacheView *image_view, CubeInfo *cube_info,const size_t level,const unsigned int direction, ExceptionInfo *exception) { MagickBooleanType status; status=MagickTrue; if (level == 1) switch (direction) { case WestGravity: { status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } return(status); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; size_t extent, level; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) cube_info->diffusion=StringToDoubleInterval(artifact,1.0); if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; extent=MagickMax(image->columns,image->rows); level=(size_t) log2((double) extent); if (((size_t) 1UL << level) < extent) level++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); status=MagickTrue; if (level > 0) status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double weight; size_t length; ssize_t i; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]=PerceptibleReciprocal(weight); weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0)); } cube_info->diffusion=1.0; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait != UndefinedPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K m e a n s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KmeansImage() applies k-means color reduction to an image. This is a % colorspace clustering or segmentation technique. % % The format of the KmeansImage method is: % % MagickBooleanType KmeansImage(Image *image,const size_t number_colors, % const size_t max_iterations,const double tolerance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_colors: number of colors to use as seeds. % % o max_iterations: maximum number of iterations while converging. % % o tolerance: the maximum tolerance. % % o exception: return any errors or warnings in this structure. % */ typedef struct _KmeansInfo { double red, green, blue, alpha, black, count, distortion; } KmeansInfo; static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info) { ssize_t i; assert(kmeans_info != (KmeansInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (kmeans_info[i] != (KmeansInfo *) NULL) kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]); kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info); return(kmeans_info); } static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors) { KmeansInfo **kmeans_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads, sizeof(*kmeans_info)); if (kmeans_info == (KmeansInfo **) NULL) return((KmeansInfo **) NULL); (void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info)); for (i=0; i < (ssize_t) number_threads; i++) { kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors, sizeof(**kmeans_info)); if (kmeans_info[i] == (KmeansInfo *) NULL) return(DestroyKmeansThreadSet(kmeans_info)); } return(kmeans_info); } static inline double KmeansMetric(const Image *magick_restrict image, const Quantum *magick_restrict p,const PixelInfo *magick_restrict q) { double gamma, metric, pixel; gamma=1.0; metric=0.0; if ((image->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); metric+=pixel*pixel; if (image->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*GetPixelAlpha(image,p); if (q->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*q->alpha; } if (image->colorspace == CMYKColorspace) { pixel=QuantumScale*(GetPixelBlack(image,p)-q->black); metric+=gamma*pixel*pixel; gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p)); gamma*=QuantumScale*(QuantumRange-q->black); } metric*=3.0; pixel=QuantumScale*(GetPixelRed(image,p)-q->red); if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs((double) pixel) > 0.5) pixel-=0.5; pixel*=2.0; } metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelGreen(image,p)-q->green); metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue); metric+=gamma*pixel*pixel; return(metric); } MagickExport MagickBooleanType KmeansImage(Image *image, const size_t number_colors,const size_t max_iterations,const double tolerance, ExceptionInfo *exception) { #define KmeansImageTag "Kmeans/Image" #define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info)) CacheView *image_view; const char *colors; double previous_tolerance; KmeansInfo **kmeans_pixels; MagickBooleanType verbose, status; ssize_t n; size_t number_threads; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colors=GetImageArtifact(image,"kmeans:seed-colors"); if (colors == (const char *) NULL) { CubeInfo *cube_info; QuantizeInfo *quantize_info; size_t depth; /* Seed clusters from color quantization. */ quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->colorspace=image->colorspace; quantize_info->number_colors=number_colors; quantize_info->dither_method=NoDitherMethod; n=number_colors; for (depth=1; n != 0; depth++) n>>=2; cube_info=GetCubeInfo(quantize_info,depth,number_colors); if (cube_info == (CubeInfo *) NULL) { quantize_info=DestroyQuantizeInfo(quantize_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=SetImageColormap(image,cube_info,exception); } DestroyCubeInfo(cube_info); quantize_info=DestroyQuantizeInfo(quantize_info); if (status == MagickFalse) return(status); } else { char color[MagickPathExtent]; const char *p; /* Seed clusters from color list (e.g. red;green;blue). */ status=AcquireImageColormap(image,number_colors,exception); if (status == MagickFalse) return(status); for (n=0, p=colors; n < (ssize_t) image->colors; n++) { const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,image->colormap+n, exception); if (*q == '\0') { n++; break; } p=q+1; } if (n < (ssize_t) image->colors) { RandomInfo *random_info; /* Seed clusters from random values. */ random_info=AcquireRandomInfo(); for ( ; n < (ssize_t) image->colors; n++) { (void) QueryColorCompliance("#000",AllCompliance,image->colormap+n, exception); image->colormap[n].red=RandomColorComponent(random_info); image->colormap[n].green=RandomColorComponent(random_info); image->colormap[n].blue=RandomColorComponent(random_info); if (image->alpha_trait != UndefinedPixelTrait) image->colormap[n].alpha=RandomColorComponent(random_info); if (image->colorspace == CMYKColorspace) image->colormap[n].black=RandomColorComponent(random_info); } random_info=DestroyRandomInfo(random_info); } } /* Iterative refinement. */ kmeans_pixels=AcquireKmeansThreadSet(number_colors); if (kmeans_pixels == (KmeansInfo **) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); previous_tolerance=0.0; verbose=IsStringTrue(GetImageArtifact(image,"debug")); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view=AcquireAuthenticCacheView(image,exception); for (n=0; n < (ssize_t) max_iterations; n++) { double distortion; ssize_t j, y; for (j=0; j < (ssize_t) number_threads; j++) (void) memset(kmeans_pixels[j],0,image->colors*sizeof(*kmeans_pixels[j])); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double min_distance; ssize_t i, k; /* Assign each pixel whose mean has the least squared color distance. */ k=0; min_distance=KmeansMetric(image,q,image->colormap+0); for (i=1; i < (ssize_t) image->colors; i++) { double distance; if (min_distance <= MagickEpsilon) break; distance=KmeansMetric(image,q,image->colormap+i); if (distance < min_distance) { min_distance=distance; k=i; } } kmeans_pixels[id][k].red+=QuantumScale*GetPixelRed(image,q); kmeans_pixels[id][k].green+=QuantumScale*GetPixelGreen(image,q); kmeans_pixels[id][k].blue+=QuantumScale*GetPixelBlue(image,q); if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[id][k].alpha+=QuantumScale*GetPixelAlpha(image,q); if (image->colorspace == CMYKColorspace) kmeans_pixels[id][k].black+=QuantumScale*GetPixelBlack(image,q); kmeans_pixels[id][k].count++; kmeans_pixels[id][k].distortion+=min_distance; SetPixelIndex(image,(Quantum) k,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } if (status == MagickFalse) break; /* Reduce sums to [0] entry. */ for (j=1; j < (ssize_t) number_threads; j++) { ssize_t k; for (k=0; k < (ssize_t) image->colors; k++) { kmeans_pixels[0][k].red+=kmeans_pixels[j][k].red; kmeans_pixels[0][k].green+=kmeans_pixels[j][k].green; kmeans_pixels[0][k].blue+=kmeans_pixels[j][k].blue; if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[0][k].alpha+=kmeans_pixels[j][k].alpha; if (image->colorspace == CMYKColorspace) kmeans_pixels[0][k].black+=kmeans_pixels[j][k].black; kmeans_pixels[0][k].count+=kmeans_pixels[j][k].count; kmeans_pixels[0][k].distortion+=kmeans_pixels[j][k].distortion; } } /* Calculate the new means (centroids) of the pixels in the new clusters. */ distortion=0.0; for (j=0; j < (ssize_t) image->colors; j++) { double gamma; gamma=PerceptibleReciprocal((double) kmeans_pixels[0][j].count); image->colormap[j].red=gamma*QuantumRange*kmeans_pixels[0][j].red; image->colormap[j].green=gamma*QuantumRange*kmeans_pixels[0][j].green; image->colormap[j].blue=gamma*QuantumRange*kmeans_pixels[0][j].blue; if (image->alpha_trait != UndefinedPixelTrait) image->colormap[j].alpha=gamma*QuantumRange*kmeans_pixels[0][j].alpha; if (image->colorspace == CMYKColorspace) image->colormap[j].black=gamma*QuantumRange*kmeans_pixels[0][j].black; distortion+=kmeans_pixels[0][j].distortion; } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n, GetMagickPrecision(),distortion,GetMagickPrecision(), fabs(distortion-previous_tolerance)); if (fabs(distortion-previous_tolerance) <= tolerance) break; previous_tolerance=distortion; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n, max_iterations); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels); if (image->progress_monitor != (MagickProgressMonitor) NULL) (void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType) max_iterations-1,max_iterations); if (status == MagickFalse) return(status); return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); if (cube_info->nodes > cube_info->maximum_colors) { /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; ImageType type; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; type=IdentifyImageGray(image,exception); if (IsGrayImageType(type) != MagickFalse) (void) SetGrayscaleImage(image,exception); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5)) depth--; if (IsGrayImageType(type) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; size_t depth, maximum_colors, number_images; ssize_t i; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { size_t n, number_children; ssize_t i; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; size_t extent; ssize_t *colormap_index, i, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColormap() traverses the color cube tree and sets the colormap of % the image. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the SetImageColormap method is: % % MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, % ExceptionInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { size_t number_colors; number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; DefineImageColormap(image,cube_info,cube_info->root); if (image->colors != number_colors) { image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } return(MagickTrue); }
contact_utilities.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_CONTACT_UTILITIES) #define KRATOS_CONTACT_UTILITIES // System includes // External includes // Project includes #include "utilities/openmp_utils.h" #include "utilities/math_utils.h" #include "contact_structural_mechanics_application_variables.h" #include "includes/model_part.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ /** * @class ContactUtilities * @ingroup ContactStructuralMechanicsApplication * @brief This class includes some utilities used for contact computations * @author Vicente Mataix Ferrandiz */ class ContactUtilities { public: ///@name Type Definitions ///@{ /// Pointer definition of MortarUtilities KRATOS_CLASS_POINTER_DEFINITION( ContactUtilities ); // Some geometrical definitions typedef Node<3> NodeType; typedef Point::CoordinatesArrayType CoordinatesArrayType; /// Definition of geometries typedef Geometry<NodeType> GeometryType; /// The containers of the components of the model parts typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; /// Index type definition typedef std::size_t IndexType; /// Size type definition typedef std::size_t SizeType; ///@} ///@name Life Cycle ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ ///@} ///@name Operations ///@{ /** * @brief This function computes the relative size of the mesh * @param rModelPart The modelpart to compute */ static inline double CalculateRelativeSizeMesh(ModelPart& rModelPart) { return CalculateMaxNodalH(rModelPart)/CalculateMinimalNodalH(rModelPart); } /** * @brief This method computes the maximal nodal H * @param rModelPart The modelpart to compute */ static inline double CalculateMaxNodalH(ModelPart& rModelPart) { // We iterate over the nodes NodesArrayType& r_nodes_array = rModelPart.Nodes(); const auto it_node_begin = r_nodes_array.begin(); // // Creating the max auxiliar value // double max_value = 0.0; // // #pragma omp parallel for reduction(max:max_value) // for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { // auto it_node = it_node_begin + i; // KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl; // max_value = std::max(max_value, it_node->FastGetSolutionStepValue(NODAL_H)); // } // // return max_value; // Creating a buffer for parallel vector fill const int num_threads = OpenMPUtils::GetNumThreads(); std::vector<double> max_vector(num_threads, 0.0); double nodal_h; #pragma omp parallel for private(nodal_h) for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { auto it_node = it_node_begin + i; KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl; nodal_h = it_node->FastGetSolutionStepValue(NODAL_H); const int id = OpenMPUtils::ThisThread(); if (nodal_h > max_vector[id]) max_vector[id] = nodal_h; } return *std::max_element(max_vector.begin(), max_vector.end()); } /** * @brief This method computes the mean nodal H * @param rModelPart The modelpart to compute */ static inline double CalculateMeanNodalH(ModelPart& rModelPart) { // We iterate over the nodes NodesArrayType& r_nodes_array = rModelPart.Nodes(); const auto it_node_begin = r_nodes_array.begin(); // Creating the sum auxiliar value double sum_nodal_h = 0.0; #pragma omp parallel for reduction(+:sum_nodal_h) for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { auto it_node = it_node_begin + i; KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl; sum_nodal_h += it_node->FastGetSolutionStepValue(NODAL_H);; } return sum_nodal_h/static_cast<double>(r_nodes_array.size()); } /** * @brief This method computes the minimal nodal H * @param rModelPart The modelpart to compute */ static inline double CalculateMinimalNodalH(ModelPart& rModelPart) { // We iterate over the nodes NodesArrayType& r_nodes_array = rModelPart.Nodes(); const auto it_node_begin = r_nodes_array.begin(); // // Creating the min auxiliar value // double min_value = 0.0; // // #pragma omp parallel for reduction(min:min_value) // for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { // auto it_node = it_node_begin + i; // KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl; // min_value = std::min(min_value, it_node->FastGetSolutionStepValue(NODAL_H)); // } // // return min_value; // Creating a buffer for parallel vector fill const int num_threads = OpenMPUtils::GetNumThreads(); std::vector<double> min_vector(num_threads, 0.0); double nodal_h; #pragma omp parallel for private(nodal_h) for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { auto it_node = it_node_begin + i; KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl; nodal_h = it_node->FastGetSolutionStepValue(NODAL_H); const int id = OpenMPUtils::ThisThread(); if (nodal_h > min_vector[id]) min_vector[id] = nodal_h; } return *std::min_element(min_vector.begin(), min_vector.end()); } /** * @brief This function scales the points according to a factor (to increase the bounding box) * @param rPointToScale The point to scale * @param rNormal The normal of the point * @param LengthSearch The factor considered to "grow" the node */ template<class TPointType> static inline void ScaleNode( TPointType& rPointToScale, const array_1d<double, 3>& rNormal, const double LengthSearch ) { noalias(rPointToScale.Coordinates()) = rPointToScale.Coordinates() + rNormal * LengthSearch; } /** * @brief Calculates the distance between nodes * @param rPointOrigin The first node * @param rPointDestiny The second node */ static inline double DistancePoints( const GeometryType::CoordinatesArrayType& rPointOrigin, const GeometryType::CoordinatesArrayType& rPointDestiny ) { return std::sqrt((rPointOrigin[0] - rPointDestiny[0]) * (rPointOrigin[0] - rPointDestiny[0]) + (rPointOrigin[1] - rPointDestiny[1]) * (rPointOrigin[1] - rPointDestiny[1]) + (rPointOrigin[2] - rPointDestiny[2]) * (rPointOrigin[2] - rPointDestiny[2])); } /** * @brief It calculates the center updated in u_n+1 or u_n+1/2 * @param rModelPart The modelpart to update * @param DeltaTime The increment of time considered * @param HalfJump If the jumpt is just half dt */ static inline void ComputeStepJump( ModelPart& rModelPart, const double DeltaTime, const bool HalfJump = true ) { // Time constants const double velocity_constant = HalfJump ? 0.25 : 0.5; const double acceleration_constant = HalfJump ? 0.125 : 0.5; // Iterate over the nodes NodesArrayType& r_nodes_array = rModelPart.Nodes(); // Node iterator const auto it_node_begin = r_nodes_array.begin(); // We compute the half jump array_1d<double, 3> new_delta_disp; #pragma omp parallel for firstprivate(new_delta_disp) for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { auto it_node = it_node_begin + i; const array_1d<double, 3>& r_current_velocity = it_node->FastGetSolutionStepValue(VELOCITY); const array_1d<double, 3>& r_previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY, 1); const array_1d<double, 3>& r_previous_acceleration = it_node->FastGetSolutionStepValue(ACCELERATION, 1); noalias(new_delta_disp) = velocity_constant * DeltaTime * (r_current_velocity + r_previous_velocity) + acceleration_constant * std::pow(DeltaTime, 2) * r_previous_acceleration; if (it_node->IsFixed(DISPLACEMENT_X)) new_delta_disp[0] = 0.0; if (it_node->IsFixed(DISPLACEMENT_Y)) new_delta_disp[1] = 0.0; if (it_node->IsFixed(DISPLACEMENT_Z)) new_delta_disp[2] = 0.0; it_node->SetValue(DELTA_COORDINATES, new_delta_disp); } } /** * @brief It checks the activity of the current contact simulation * @param rModelPart The modelpart to check the activity * @param ThrowError If an error is thrown */ static inline bool CheckActivity( ModelPart& rModelPart, const bool ThrowError = true ) { // Iterate over the nodes NodesArrayType& r_nodes_array = rModelPart.Nodes(); // Node iterator const auto it_node_begin = r_nodes_array.begin(); // We compute the half jump IndexType aux_check = 0; #pragma omp parallel for reduction(+:aux_check) for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { auto it_node = it_node_begin + i; if (it_node->Is(SLAVE)) { if (it_node->Is(ACTIVE)) { aux_check += 1; } } } const bool is_active = aux_check == 0 ? false : true; KRATOS_ERROR_IF(ThrowError && !is_active) << "CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; return is_active; } /** * @brief This method removes the model parts with computing conditions * @details So for example we can remove potential errors in remeshing processes * @param rModelPart The modelpart to clean up */ static inline void CleanContactModelParts(ModelPart& rModelPart) { ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); KRATOS_TRACE_IF("Empty model part", r_conditions_array.size() == 0) << "YOUR CONTACT MODEL PART IS EMPTY" << std::endl; const auto it_cond_begin = r_conditions_array.begin(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; const auto& r_geometry = it_cond->GetGeometry(); if (r_geometry.NumberOfGeometryParts() > 0) { it_cond->Set(TO_ERASE); } } rModelPart.RemoveConditionsFromAllLevels(TO_ERASE); } /** * @brief It computes the explicit contributions of the conditions * @param rModelPart The modelpart to update */ static inline void ComputeExplicitContributionConditions(ModelPart& rModelPart) { ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); KRATOS_TRACE_IF("Empty model part", r_conditions_array.size() == 0) << "YOUR COMPUTING CONTACT MODEL PART IS EMPTY" << std::endl; const auto it_cond_begin = r_conditions_array.begin(); const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; it_cond->AddExplicitContribution(r_process_info); } } /** * @brief It activates the conditions with active nodes * @param rModelPart The modelpart to check */ static inline void ActivateConditionWithActiveNodes(ModelPart& rModelPart) { ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); KRATOS_TRACE_IF("Empty model part", r_conditions_array.size() == 0) << "YOUR COMPUTING CONTACT MODEL PART IS EMPTY" << std::endl; const auto it_cond_begin = r_conditions_array.begin(); bool is_active = false; #pragma omp parallel for firstprivate(is_active) for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; const GeometryType& r_geometry = it_cond->GetGeometry(); if (r_geometry.NumberOfGeometryParts() > 0) { const GeometryType& r_parent_geometry = r_geometry.GetGeometryPart(0); is_active = false; for ( IndexType i_node = 0; i_node < r_parent_geometry.size(); ++i_node ) { if (r_parent_geometry[i_node].Is(ACTIVE)) { is_active = true; break; } } it_cond->Set(ACTIVE, is_active); } } } /** * @brief It calculates the center updated in u_n+1/2 * @param rThisGeometry The geometry to calculate * @return point: The center in u_n+1/2 (Newmark) */ static inline array_1d<double, 3> GetHalfJumpCenter(GeometryType& rThisGeometry) { array_1d<double, 3> center = (rThisGeometry.Center()).Coordinates(); // Initialize variables Vector N; GeometryType::CoordinatesArrayType local_point; // Get shape functions rThisGeometry.PointLocalCoordinates( local_point, center ); rThisGeometry.ShapeFunctionsValues( N, local_point ); KRATOS_DEBUG_ERROR_IF_NOT(rThisGeometry[0].Has(DELTA_COORDINATES)) << "Please call ComputeStepJump() first" << std::endl; const Vector new_delta_disp_center = prod(trans(GetVariableMatrix(rThisGeometry, DELTA_COORDINATES)), N); for (IndexType i = 0; i < new_delta_disp_center.size(); ++i) center[i] += new_delta_disp_center[i]; return center; } /** * @brief It calculates the matrix containing the tangent vector of the r_gt (for frictional contact) * @param rGeometry The geometry to calculate * @param StepSlip The considered step slip * @return tangent_matrix The matrix containing the tangent vectors of the r_gt */ template< std::size_t TDim, std::size_t TNumNodes> static inline BoundedMatrix<double, TNumNodes, TDim> ComputeTangentMatrixSlip( const GeometryType& rGeometry, const std::size_t StepSlip = 1 ) { /* DEFINITIONS */ // Zero tolerance const double zero_tolerance = std::numeric_limits<double>::epsilon(); // Tangent matrix BoundedMatrix<double, TNumNodes, TDim> tangent_matrix; for (IndexType i_node = 0; i_node < TNumNodes; ++i_node) { const array_1d<double, 3>& r_gt = rGeometry[i_node].FastGetSolutionStepValue(WEIGHTED_SLIP, StepSlip); const double norm_slip = norm_2(r_gt); if (norm_slip > zero_tolerance) { // Non zero r_gt const array_1d<double, 3> tangent_slip = r_gt/norm_slip; for (std::size_t i_dof = 0; i_dof < TDim; ++i_dof) tangent_matrix(i_node, i_dof) = tangent_slip[i_dof]; } else { // We consider the tangent direction as auxiliar const array_1d<double, 3>& r_normal = rGeometry[i_node].FastGetSolutionStepValue(NORMAL); array_1d<double, 3> tangent_xi, tangent_eta; MathUtils<double>::OrthonormalBasis(r_normal, tangent_xi, tangent_eta); if (TDim == 3) { for (std::size_t i_dof = 0; i_dof < 3; ++i_dof) tangent_matrix(i_node, i_dof) = tangent_xi[i_dof]; } else { if (std::abs(tangent_xi[2]) > std::numeric_limits<double>::epsilon()) { for (std::size_t i_dof = 0; i_dof < 2; ++i_dof) tangent_matrix(i_node, i_dof) = tangent_eta[i_dof]; } else { for (std::size_t i_dof = 0; i_dof < 2; ++i_dof) tangent_matrix(i_node, i_dof) = tangent_xi[i_dof]; } } } } return tangent_matrix; } private: /** * @brief It calculates the matrix of a variable of a geometry * @param rNodes The geometry to calculate * @param rVarName The name of the variable to calculate * @return var_matrix: The matrix containing the variables of the geometry */ static inline Matrix GetVariableMatrix( const GeometryType& rNodes, const Variable<array_1d<double,3> >& rVarName ) { /* DEFINITIONS */ const SizeType num_nodes = rNodes.size(); const SizeType dim = rNodes.WorkingSpaceDimension(); Matrix var_matrix(num_nodes, dim); for (IndexType i_node = 0; i_node < num_nodes; i_node++) { const array_1d<double, 3> value = rNodes[i_node].GetValue(rVarName); for (IndexType i_dof = 0; i_dof < dim; i_dof++) var_matrix(i_node, i_dof) = value[i_dof]; } return var_matrix; } };// class ContactUtilities } #endif /* KRATOS_CONTACT_UTILITIES defined */
openmp.c
#include<stdlib.h> #include<stdio.h> #include<stdbool.h> #include<math.h> #include "omp.h" #define N 21 bool is_prime_optimized(unsigned long long n) { unsigned long long i; if (n % 2 == 0) return n == 2; for (i = 3; i * i <= n; i+=2) { if (n % i == 0) return false; } return n > 1; } bool is_prime(unsigned long long n) { int i; for (i = 2; i < n; i++) { if (n % i == 0) return false; } return n > 1; } int main(int argc, char * argv[]) { unsigned long long i, j, primes = 0, number, prevNumber; double t1, t2; int num_threads = atoi(argv[1]); omp_set_num_threads(num_threads); t1 = omp_get_wtime(); for (i = 0; i < N; i++) { prevNumber = powl(2, i - 1); number = powl(2, i); #pragma omp parallel for reduction(+:primes) schedule(dynamic) for (j = prevNumber + 1; j <= number; j++) { if (is_prime_optimized(j)) { primes++; } } printf("%llu: %llu\n", number, primes); } t2 = omp_get_wtime(); printf("Done in %lf s\n", t2-t1); }
smecy.c
//#pragma smecy remap arg(2, in, <some mapping matrix>) void SMECY_remap_int1D_to_int2D(int size_in_0, int in[size_in_0], int size_out_0, int size_out_1, int offset_out_0, int offset_out_1, int window_out_0, int window_out_1, int out[size_out_0][size_out_1]) { #pragma omp parallel for for(int i_0 = 0; i_0 < window_out_0; i_0++) for(int i_1 = 0; i_1 < window_out_1; i_1++) out[offset_out_0 + i_0][offset_out_1 + i_1] = in[window_out_1*i_0 + i_1]; } void SMECY_remap_int2D_to_int1D(int size_out_0, int size_out_1, int offset_out_0, int offset_out_1, int window_out_0, int window_out_1, int out[size_out_0][size_out_1], int size_in_0, int in[size_in_0]) { #pragma omp parallel for for(int i_0 = 0; i_0 < window_out_0; i_0++) for(int i_1 = 0; i_1 < window_out_1; i_1++) in[window_out_1*i_0 + i_1] = out[offset_out_0 + i_0][offset_out_1 + i_1]; }
struct-enter-exit-data-1.c
/* Check 'GOMP_MAP_STRUCT' mapping, and in particular that it gets removed from OpenACC 'exit data' directives. */ /* { dg-additional-options "-fdump-tree-gimple" } */ struct str { int a; int *b; int *c; int d; int *e; int f; }; #define N 1024 void test (int *b, int *c, int *e) { struct str s = { .a = 0, .b = b, .c = c, .d = 0, .e = e, .f = 0 }; #pragma acc enter data copyin(s.a, s.b[0:N], s.c[0:N] /* , s.d */ /* , s.e[0:N] */, s.f) /* { dg-final { scan-tree-dump {(?n)#pragma omp target oacc_enter_exit_data map\(struct:s \[len: 4\]\) map\(to:s.a \[len: [0-9]+\]\) map\(alloc:s.b \[len: [0-9]+\]\) map\(alloc:s.c \[len: [0-9]+\]\) map\(to:s.f \[len: [0-9]+\]\) map\(to:\*[_0-9]+ \[len: [0-9]+\]\) map\(attach:s.b \[bias: 0\]\) map\(to:\*[_0-9]+ \[len: [0-9]+\]\) map\(attach:s.c \[bias: 0\]\)$} gimple } } */ #pragma acc exit data copyout(s.a, s.b[0:N], s.c[0:N] /* , s.d */ /* , s.e[0:N] */, s.f) /* { dg-final { scan-tree-dump {(?n)#pragma omp target oacc_enter_exit_data map\(from:s.a \[len: [0-9]+\]\) map\(release:s.b \[len: [0-9]+\]\) map\(release:s.c \[len: [0-9]+\]\) map\(from:s.f \[len: [0-9]+\]\) map\(from:\*[_0-9]+ \[len: [0-9]+\]\) map\(detach:s.b \[bias: 0\]\) map\(from:\*[_0-9]+ \[len: [0-9]+\]\) map\(detach:s.c \[bias: 0\]\)$} gimple } } */ }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
solver_cvodes.c
/** * \file * \brief The integration driver for the CVODE solver * * \author Nicholas Curtis * \date 03/10/2015 * */ #include "header.h" #include "solver.h" /* CVODES INCLUDES */ #include "sundials/sundials_types.h" #include "sundials/sundials_math.h" #include "sundials/sundials_nvector.h" #include "nvector/nvector_serial.h" #include "cvodes/cvodes.h" #include "cvodes/cvodes_lapack.h" extern N_Vector *y_locals; extern double* y_local_vectors; extern void** integrators; #ifdef GENERATE_DOCS namespace cvode { #endif /** * \brief Integration driver for the CPU integrators * \param[in] NUM the number of IVPs to solve * \param[in] t the current IVP time * \param[in] t_end the time to integrate the IVP to * \param[in] pr_global the pressure value for the IVPs * \param[in, out] y_global the state vectors * * The integration driver for the CVODEs solver */ void intDriver (const int NUM, const double t, const double t_end, const double *pr_global, double *y_global) { int tid; double t_next; #pragma omp parallel for shared(y_global, pr_global, integrators, y_locals) private(tid, t_next) for (tid = 0; tid < NUM; ++tid) { int index = omp_get_thread_num(); // local array with initial values N_Vector fill = y_locals[index]; double pr_local = pr_global[tid]; // load local array with initial values from global array double* y_local = NV_DATA_S(fill); for (int i = 0; i < NSP; i++) { y_local[i] = y_global[tid + i * NUM]; } //reinit this integrator for time t, w/ updated state int flag = CVodeReInit(integrators[index], t, fill); if (flag != CV_SUCCESS) { printf("Error reinitializing integrator for thread %d, code: %d\n", tid, flag); exit(flag); } //set user data to Pr flag = CVodeSetUserData(integrators[index], &pr_local); if (flag != CV_SUCCESS) { printf("Error setting user data for thread %d, code: %d\n", tid, flag); exit(flag); } //set end time flag = CVodeSetStopTime(integrators[index], t_end); if (flag != CV_SUCCESS) { printf("Error setting end time for thread %d, code: %d\n", tid, flag); exit(flag); } // call integrator for one time step flag = CVode(integrators[index], t_end, fill, &t_next, CV_NORMAL); if ((flag != CV_SUCCESS && flag != CV_TSTOP_RETURN) || t_next != t_end) { printf("Error on integration step for thread %d, code %d\n", tid, flag); exit(flag); } // update global array with integrated values for (int i = 0; i < NSP; i++) { y_global[tid + i * NUM] = y_local[i]; } } // end tid loop } // end intDriver #ifdef GENERATE_DOCS } #endif
GB_emult_02.c
//------------------------------------------------------------------------------ // GB_emult_02: C = A.*B where A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = A.*B where A is sparse/hyper and B is bitmap/full constructs C with // the same sparsity structure as A. This method can also be called with // the two input matrices swapped, with flipxy true, to handle the case // where A is bitmap/full and B is sparse/hyper. // When no mask is present, or the mask is applied later, this method handles // the following cases: // ------------------------------------------ // C = A .* B // ------------------------------------------ // sparse . sparse bitmap // sparse . sparse full // sparse . bitmap sparse // sparse . full sparse // If M is sparse/hyper and complemented, it is not passed here: // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // sparse sparse sparse bitmap (mask later) // sparse sparse sparse full (mask later) // sparse sparse bitmap sparse (mask later) // sparse sparse full sparse (mask later) // If M is present, it is bitmap/full: // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // sparse bitmap sparse bitmap // sparse bitmap sparse full // sparse bitmap bitmap sparse // sparse bitmap full sparse // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // sparse full sparse bitmap // sparse full sparse full // sparse full bitmap sparse // sparse full full sparse // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // sparse bitmap sparse bitmap // sparse bitmap sparse full // sparse bitmap bitmap sparse // sparse bitmap full sparse // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // sparse full sparse bitmap // sparse full sparse full // sparse full bitmap sparse // sparse full full sparse #include "GB_ewise.h" #include "GB_emult.h" #include "GB_binop.h" #include "GB_unused.h" #ifndef GBCOMPACT #include "GB_binop__include.h" #endif #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Work, int64_t) ; \ GB_WERK_POP (A_ek_slicing, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; \ } GrB_Info GB_emult_02 // C=A.*B when A is sparse/hyper, B bitmap/full ( GrB_Matrix C, // output matrix, static header const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix M, // optional mask, unused if NULL const bool Mask_struct, // if true, use the only structure of M const bool Mask_comp, // if true, use !M const GrB_Matrix A, // input A matrix (sparse/hyper) const GrB_Matrix B, // input B matrix (bitmap/full) GrB_BinaryOp op, // op to perform C = op (A,B) bool flipxy, // if true use fmult(y,x) else fmult(x,y) GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && C->static_header) ; ASSERT_MATRIX_OK_OR_NULL (M, "M for emult_02", GB0) ; ASSERT_MATRIX_OK (A, "A for emult_02", GB0) ; ASSERT_MATRIX_OK (B, "B for emult_02", GB0) ; ASSERT_BINARYOP_OK (op, "op for emult_02", GB0) ; ASSERT_TYPE_OK (ctype, "ctype for emult_02", GB0) ; ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B)) ; ASSERT (M == NULL || GB_IS_BITMAP (B) || GB_IS_FULL (B)) ; int C_sparsity = GB_sparsity (A) ; if (M == NULL) { GBURBLE ("emult_02:(%s=%s.*%s)", GB_sparsity_char (C_sparsity), GB_sparsity_char_matrix (A), GB_sparsity_char_matrix (B)) ; } else { GBURBLE ("emult_02:(%s<%s%s%s>=%s.*%s) ", GB_sparsity_char (C_sparsity), Mask_comp ? "!" : "", GB_sparsity_char_matrix (M), Mask_struct ? ",struct" : "", GB_sparsity_char_matrix (A), GB_sparsity_char_matrix (B)) ; } //-------------------------------------------------------------------------- // revise the operator to handle flipxy //-------------------------------------------------------------------------- // Replace the ANY operator with SECOND. ANY and SECOND give the same // result if flipxy is false. However, SECOND is changed to FIRST if // flipxy is true. This ensures that the results do not depend on the // sparsity structures of A and B. if (op->opcode == GB_ANY_binop_code) { switch (op->xtype->code) { case GB_BOOL_code : op = GrB_SECOND_BOOL ; break ; case GB_INT8_code : op = GrB_SECOND_INT8 ; break ; case GB_INT16_code : op = GrB_SECOND_INT16 ; break ; case GB_INT32_code : op = GrB_SECOND_INT32 ; break ; case GB_INT64_code : op = GrB_SECOND_INT64 ; break ; case GB_UINT8_code : op = GrB_SECOND_UINT8 ; break ; case GB_UINT16_code : op = GrB_SECOND_UINT16 ; break ; case GB_UINT32_code : op = GrB_SECOND_UINT32 ; break ; case GB_UINT64_code : op = GrB_SECOND_UINT64 ; break ; case GB_FP32_code : op = GrB_SECOND_FP32 ; break ; case GB_FP64_code : op = GrB_SECOND_FP64 ; break ; case GB_FC32_code : op = GxB_SECOND_FC32 ; break ; case GB_FC64_code : op = GxB_SECOND_FC64 ; break ; default: ; } } if (flipxy) { bool handled ; op = GB_flip_op (op, &handled) ; if (handled) flipxy = false ; } ASSERT_BINARYOP_OK (op, "final op for emult_02", GB0) ; //-------------------------------------------------------------------------- // declare workspace //-------------------------------------------------------------------------- GB_WERK_DECLARE (Work, int64_t) ; int64_t *restrict Wfirst = NULL ; int64_t *restrict Wlast = NULL ; int64_t *restrict Cp_kfirst = NULL ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; //-------------------------------------------------------------------------- // get M, A, and B //-------------------------------------------------------------------------- const int8_t *restrict Mb = (M == NULL) ? NULL : M->b ; const GB_void *restrict Mx = (M == NULL || Mask_struct) ? NULL : (const GB_void *) M->x ; const size_t msize = (M == NULL) ? 0 : M->type->size ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const int64_t vlen = A->vlen ; const int64_t vdim = A->vdim ; const int64_t nvec = A->nvec ; const int64_t anz = GB_nnz (A) ; const int8_t *restrict Bb = B->b ; const bool B_is_bitmap = GB_IS_BITMAP (B) ; //-------------------------------------------------------------------------- // check if C is iso and compute its iso value if it is //-------------------------------------------------------------------------- const size_t csize = ctype->size ; GB_void cscalar [GB_VLA(csize)] ; bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ; //-------------------------------------------------------------------------- // allocate C->p and C->h //-------------------------------------------------------------------------- GB_OK (GB_new (&C, true, // sparse or hyper (same as A), static header ctype, vlen, vdim, GB_Ap_calloc, C_is_csc, C_sparsity, A->hyper_switch, nvec, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // slice the input matrix A //-------------------------------------------------------------------------- int A_nthreads, A_ntasks ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; GB_SLICE_MATRIX (A, 8, chunk) ; //-------------------------------------------------------------------------- // count entries in C //-------------------------------------------------------------------------- C->nvec_nonempty = A->nvec_nonempty ; C->nvec = nvec ; const bool C_has_pattern_of_A = !B_is_bitmap && (M == NULL) ; if (!C_has_pattern_of_A) { //---------------------------------------------------------------------- // allocate workspace //---------------------------------------------------------------------- GB_WERK_PUSH (Work, 3*A_ntasks, int64_t) ; if (Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Wfirst = Work ; Wlast = Work + A_ntasks ; Cp_kfirst = Work + A_ntasks * 2 ; //---------------------------------------------------------------------- // count entries in C //---------------------------------------------------------------------- // This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR). if (M == NULL) { //------------------------------------------------------------------ // Method2(a): C = A.*B where A is sparse/hyper and B is bitmap //------------------------------------------------------------------ ASSERT (B_is_bitmap) ; int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // count the entries in C(:,j) int64_t j = GBH (Ah, k) ; int64_t pB_start = j * vlen ; int64_t pA, pA_end ; GB_get_pA (&pA, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; int64_t cjnz = 0 ; for ( ; pA < pA_end ; pA++) { cjnz += Bb [pB_start + Ai [pA]] ; } if (k == kfirst) { Wfirst [tid] = cjnz ; } else if (k == klast) { Wlast [tid] = cjnz ; } else { Cp [k] = cjnz ; } } } } else { //------------------------------------------------------------------ // Method2(c): C<#M> = A.*B; M, B bitmap/full, A is sparse/hyper //------------------------------------------------------------------ ASSERT (M != NULL) ; int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // count the entries in C(:,j) int64_t j = GBH (Ah, k) ; int64_t pB_start = j * vlen ; int64_t pA, pA_end ; GB_get_pA (&pA, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; int64_t cjnz = 0 ; for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t pB = pB_start + i ; bool mij = GBB (Mb, pB) && GB_mcast (Mx, pB, msize) ; mij = mij ^ Mask_comp ; cjnz += (mij && GBB (Bb, pB)) ; } if (k == kfirst) { Wfirst [tid] = cjnz ; } else if (k == klast) { Wlast [tid] = cjnz ; } else { Cp [k] = cjnz ; } } } } //---------------------------------------------------------------------- // finalize Cp, cumulative sum of Cp and compute Cp_kfirst //---------------------------------------------------------------------- GB_ek_slice_merge1 (Cp, Wfirst, Wlast, A_ek_slicing, A_ntasks) ; GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec, Wfirst, Wlast, A_ek_slicing, A_ntasks, A_nthreads, Context) ; } //-------------------------------------------------------------------------- // allocate C->i and C->x //-------------------------------------------------------------------------- int64_t cnz = (C_has_pattern_of_A) ? anz : Cp [nvec] ; // set C->iso = C_iso OK GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ; //-------------------------------------------------------------------------- // copy pattern into C //-------------------------------------------------------------------------- // TODO: could make these components of C shallow instead of memcpy if (GB_IS_HYPERSPARSE (A)) { // copy A->h into C->h GB_memcpy (C->h, Ah, nvec * sizeof (int64_t), A_nthreads) ; } if (C_has_pattern_of_A) { // Method2(b): B is full and no mask present, so the pattern of C is // the same as the pattern of A GB_memcpy (Cp, Ap, (nvec+1) * sizeof (int64_t), A_nthreads) ; GB_memcpy (C->i, Ai, cnz * sizeof (int64_t), A_nthreads) ; } C->jumbled = A->jumbled ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // get the opcode //-------------------------------------------------------------------------- // if flipxy was true on input and the op is positional, FIRST, SECOND, or // PAIR, the op has already been flipped, so these tests do not have to // consider that case. GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; bool op_is_first = (opcode == GB_FIRST_binop_code) ; bool op_is_second = (opcode == GB_SECOND_binop_code) ; bool op_is_pair = (opcode == GB_PAIR_binop_code) ; GB_Type_code ccode = ctype->code ; //-------------------------------------------------------------------------- // check if the values of A and/or B are ignored //-------------------------------------------------------------------------- // With C = ewisemult (A,B), only the intersection of A and B is used. // If op is SECOND or PAIR, the values of A are never accessed. // If op is FIRST or PAIR, the values of B are never accessed. // If op is PAIR, the values of A and B are never accessed. // Contrast with ewiseadd. // A is passed as x, and B as y, in z = op(x,y) bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ; bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ; //-------------------------------------------------------------------------- // using a built-in binary operator (except for positional operators) //-------------------------------------------------------------------------- #define GB_PHASE_2_OF_2 bool done = false ; if (C_iso) { //---------------------------------------------------------------------- // C is iso //---------------------------------------------------------------------- // Cx [0] = cscalar = op (A,B) GB_BURBLE_MATRIX (C, "(iso emult) ") ; memcpy (C->x, cscalar, csize) ; // pattern of C = set intersection of pattern of A and B // flipxy is ignored since the operator is not applied #define GB_ISO_EMULT #include "GB_emult_02_template.c" done = true ; } else { #ifndef GBCOMPACT //------------------------------------------------------------------ // define the worker for the switch factory //------------------------------------------------------------------ #define GB_AemultB_02(mult,xname) GB (_AemultB_02_ ## mult ## xname) #define GB_BINOP_WORKER(mult,xname) \ { \ info = GB_AemultB_02(mult,xname) (C, \ M, Mask_struct, Mask_comp, A, B, flipxy, \ Cp_kfirst, A_ek_slicing, A_ntasks, A_nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //------------------------------------------------------------------ // launch the switch factory //------------------------------------------------------------------ GB_Type_code xcode, ycode, zcode ; if (!op_is_positional && GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern, op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode) { #define GB_NO_PAIR #include "GB_binop_factory.c" } #endif } //-------------------------------------------------------------------------- // generic worker //-------------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (C, "(generic emult_02: %s) ", op->name) ; int ewise_method = flipxy ? GB_EMULT_METHOD3 : GB_EMULT_METHOD2 ; GB_ewise_generic (C, op, NULL, 0, 0, NULL, NULL, NULL, C_sparsity, ewise_method, Cp_kfirst, NULL, 0, 0, A_ek_slicing, A_ntasks, A_nthreads, NULL, 0, 0, M, Mask_struct, Mask_comp, A, B, Context) ; } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C output for emult_02", GB0) ; return (GrB_SUCCESS) ; }
GB_unaryop__one_int8_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_int8_int8 // op(A') function: GB_tran__one_int8_int8 // C type: int8_t // A type: int8_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_int8_int8 ( int8_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_nest_testlock.c
#include <stdio.h> #include <omp.h> #include "omp_testsuite.h" int check_omp_nest_testlock (FILE * logFile) { omp_nest_lock_t lck; int nr_threads_in_single = 0; int result = 0; int nr_iterations = 0; int i; omp_init_nest_lock (&lck); #pragma omp parallel shared(lck) { #pragma omp for for (i = 0; i < LOOPCOUNT; i++) { /*omp_set_lock(&lck); */ while (!omp_test_nest_lock (&lck)) { }; #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; omp_unset_nest_lock (&lck); } } omp_destroy_nest_lock (&lck); return ((result == 0) && (nr_iterations == LOOPCOUNT)); } int crosscheck_omp_nest_testlock (FILE * logFile) { omp_nest_lock_t lck; int nr_threads_in_single = 0; int result = 0; int nr_iterations = 0; int i; omp_init_nest_lock (&lck); #pragma omp parallel shared(lck) { #pragma omp for for (i = 0; i < LOOPCOUNT; i++) { /*omp_set_lock(&lck); */ /*while(!omp_test_nest_lock(&lck)) {}; */ #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; /*omp_unset_nest_lock(&lck); */ } } /*omp_destroy_nest_lock(&lck); */ return ((result == 0) && (nr_iterations == LOOPCOUNT)); }
coloring_jones_v4.h
#include "gms/third_party/gapbs/benchmark.h" #include "gms/third_party/gapbs/builder.h" #include "gms/third_party/gapbs/command_line.h" #include "gms/third_party/gapbs/graph.h" #include "coloring_common.h" #include <vector> #include <unordered_map> #include <random> // We need to know the cache line size at compile-time for alignas, but we can only determine it at runtime. // TODO: What do? Just leave this hack with CACHE_LINE_SIZE = 64? #define CACHE_LINE_SIZE 64 // Communication between threads using a lock-free queue namespace GMS::Coloring::JonesV4 { class alignas(CACHE_LINE_SIZE) ready_queue { private: NodeId* data; // shared size_t write_pos; // shared size_t read_pos; // private public: ready_queue() : data(nullptr), write_pos(0), read_pos(0) {} ~ready_queue() { if (write_pos != read_pos) { std::cout << "Tried to destroy a non-empty ready_queue" << std::endl; exit(EXIT_FAILURE); } delete[] data; } void init(size_t max_size) { if (data != nullptr) { std::cout << "Tried to re-init a ready_queue" << std::endl; exit(EXIT_FAILURE); } data = new NodeId[max_size](); // Zero-initialized } // Used by multiple threads to notify a thread that one of its vertices is now ready void enqueue(NodeId ready_vertex) { size_t my_write_pos; #pragma omp atomic capture my_write_pos = write_pos++; #pragma omp atomic write data[my_write_pos] = (ready_vertex + 1); } // Only called by the owner thread, spins until a ready vertex is available void dequeue(std::vector<NodeId> &color_queue) { // Wait until at least one ready vertex is available size_t cur_write_pos; do { #pragma omp atomic read cur_write_pos = write_pos; } while (cur_write_pos == read_pos); int64_t ready_vertex; for (; read_pos < cur_write_pos; ++read_pos) { // Wait until a thread writes to data[read_pos], i.e. data[read_pos] != 0 do { #pragma omp atomic read ready_vertex = data[read_pos]; } while (ready_vertex == 0); color_queue.push_back(ready_vertex - 1); } } }; // Book-keeping of which vertices to notify if a vertex gets colored class node_queue { private: const NodeId* beginPtr; const NodeId* endPtr; public: node_queue(const NodeId* begin, const NodeId* end) : beginPtr(begin), endPtr(end) {} const NodeId* begin() const { return beginPtr; } const NodeId* end() const { return endPtr; } }; class node_queue_list { private: int64_t part_start; size_t cur_node_idx; size_t cur_offset_idx; NodeId *nodes; size_t *node_offsets; public: node_queue_list(int64_t part_start, int64_t part_end, size_t max_size) : part_start(part_start), cur_node_idx(0), cur_offset_idx(1), nodes(new NodeId[max_size]), node_offsets(new size_t[part_end - part_start + 1]) { node_offsets[0] = 0; } ~node_queue_list() { delete[] nodes; delete[] node_offsets; } void insert(NodeId node) { nodes[cur_node_idx++] = node; } void next_node() { node_offsets[cur_offset_idx++] = cur_node_idx; } const node_queue operator[](const NodeId v) const { const int64_t v_idx = v - part_start; size_t begin = node_offsets[v_idx]; size_t end = node_offsets[v_idx+1]; return node_queue(&nodes[begin], &nodes[end]); } }; void notify_threads(const node_queue send_queue, std::vector<int32_t> &n_wait, std::vector<ready_queue> &ready_queues, const int64_t part_max_size) { for (NodeId w : send_queue) { int32_t num_waiting; #pragma omp atomic capture num_waiting = --n_wait[w]; if (num_waiting == 0) { // w now isn't waiting for any other vertices anymore, so tell the w's thread that w is ready size_t w_thread_id = w / part_max_size; ready_queues[w_thread_id].enqueue(w); } } } // Sequential coloring algorithms template <class CGraph> int32_t pick_lowest_consistent_color(const CGraph& g, std::vector<int32_t> &coloring, const NodeId v, std::vector<bool> &color_palette) { // If all deg(v) neighbors have distinct colors 1..deg(v), then deg(v) + 1 will be a consistent color // Else, there will be a color i with 1 <= i <= deg(v) which was not selected for any neighbor const int32_t deg = g.out_degree(v); for (NodeId w : g.out_neigh(v)) { int32_t w_color; #pragma omp atomic read w_color = coloring[w]; if (w_color <= deg) { color_palette[w_color] = true; } } int32_t color; for (color = 1; color <= deg; ++color) { if (!color_palette[color]) break; } #pragma omp atomic write coloring[v] = color; // Reset the color pallette to false - only [0..deg] were used std::fill(color_palette.begin(), color_palette.begin() + (deg + 1), false); return color; } template <class CGraph> using seq_coloring_func = int32_t (*)(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, std::vector<int32_t> &n_wait, const node_queue_list &send_queues, std::vector<ready_queue> &ready_queues, const int64_t part_max_size, std::vector<bool> &color_palette, std::vector<NodeId>& order); // int32_t sequential_coloring_unordered(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, // std::vector<int32_t> &n_wait, const node_queue_list &send_queues, // std::vector<ready_queue> &ready_queues, const int64_t part_max_size, // std::vector<bool> &color_palette) { // int32_t max_color = 0; // for (NodeId v : color_queue) { // int32_t color = pick_lowest_consistent_color(g, coloring, v, color_palette); // max_color = std::max(max_color, color); // notify_threads(send_queues[v], n_wait, ready_queues, part_max_size); // } // return max_color; // } // int32_t sequential_coloring_ldo(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, // std::vector<int32_t> &n_wait, const node_queue_list &send_queues, // std::vector<ready_queue> &ready_queues, const int64_t part_max_size, // std::vector<bool> &color_palette, std::vector<NodeId>& order) { // struct idx_and_degree { // size_t index; // int64_t degree; // idx_and_degree(size_t i, int64_t d) : index(i), degree(d) {} // bool operator <(const idx_and_degree &other) { // return degree > other.degree; // Sort descending by degree // } // }; // int32_t max_color = 0; // size_t n_to_color = color_queue.size(); // std::vector<idx_and_degree> work_list; // work_list.reserve(n_to_color); // for (size_t i = 0; i < n_to_color; ++i) { // NodeId v = color_queue[i]; // work_list.emplace_back(i, g.out_degree(v)); // } // std::sort(work_list.begin(), work_list.end()); // for (idx_and_degree item : work_list) { // NodeId v = color_queue[item.index]; // int32_t color = pick_lowest_consistent_color(g, coloring, v, color_palette); // max_color = std::max(max_color, color); // notify_threads(send_queues[v], n_wait, ready_queues, part_max_size); // } // return max_color; // } template <class CGraph> int32_t sequential_custom_order_coloring(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, std::vector<int32_t> &n_wait, const node_queue_list &send_queues, std::vector<ready_queue> &ready_queues, const int64_t part_max_size, std::vector<bool> &color_palette, std::vector<NodeId>& order) { int32_t max_color = 0; size_t n_to_color = color_queue.size(); std::vector<size_t> max_deg_heap(n_to_color); std::unordered_map<NodeId, size_t> rev_map; for (size_t i = 0; i < n_to_color; ++i) { NodeId v = color_queue[i]; rev_map[v] = i; max_deg_heap[i] = i; } bool modified = true; for (size_t i = 0; i < n_to_color; ++i) { if (modified) { std::make_heap(max_deg_heap.begin(), max_deg_heap.end(), [&](const size_t a, const size_t b) -> bool { return order[a] < order[b]; }); modified = false; } NodeId v = color_queue[max_deg_heap[0]]; std::pop_heap(max_deg_heap.begin(), max_deg_heap.end()); max_deg_heap.pop_back(); int32_t color = pick_lowest_consistent_color(g, coloring, v, color_palette); max_color = std::max(max_color, color); notify_threads(send_queues[v], n_wait, ready_queues, part_max_size); for (NodeId w : g.out_neigh(v)) { auto map_it = rev_map.find(w); if (map_it == rev_map.end()) continue; NodeId v = map_it->second; modified = true; } } return max_color; } // Graph partitioning uint64_t rho(const uint32_t seed, uint64_t v) { const uint64_t rnd_prime_64 = 0xE57EACE69B044FE7ULL; v = (v * rnd_prime_64) + seed; v = (v >> 17) | (v << 47); v = (v + seed) * rnd_prime_64; return v; } template <class CGraph> size_t partition_graph(const CGraph &g, const int64_t part_start, const int64_t part_end, const uint32_t rho_seed, std::vector<int32_t> &n_wait, node_queue_list &send_queue, std::vector<NodeId> &color_queue, std::vector<NodeId> &local_vertices) { int64_t max_degree = 0; for (NodeId v = part_start; v < part_end; ++v) { bool is_local = true; // A vertex is local iff none of its neighbors are part of another partition int32_t n_wait_v = 0; uint64_t rho_v = rho(rho_seed, (uint64_t) v); for (NodeId w : g.out_neigh(v)) { if (part_start <= w && w < part_end) { continue; // Skip local neighbors } is_local = false; // Has a shared edge, no longer a local vertex uint64_t rho_w = rho(rho_seed, (uint64_t) w); if (rho_w > rho_v) { ++n_wait_v; } else { send_queue.insert(w); } } if (is_local) { local_vertices.push_back(v); } else if (n_wait_v == 0) { // Shared vertex doesn't have to wait for any other vertices to be colored, so we can immediately color it color_queue.push_back(v); } max_degree = std::max(max_degree, g.out_degree(v)); n_wait[v] = n_wait_v; send_queue.next_node(); } return max_degree; } // Actual parallel algorithm template <class CGraph> int32_t graph_coloring_jones(const CGraph &g, std::vector<int32_t> &coloring, std::vector<NodeId>& order) { // DetailTimer timer; const seq_coloring_func<CGraph> seq_color = &sequential_custom_order_coloring; const int64_t n = g.num_nodes(); std::vector<ready_queue> ready_queues(omp_get_max_threads()); std::vector<int32_t> n_wait(n); int32_t num_colors = 0; size_t shared_vertices_count = 0; std::random_device rd; const uint32_t rho_seed = rd(); std::cout << omp_get_max_threads() << " " << omp_get_num_threads() << std::endl; // timer.endPhase("init"); #pragma omp parallel shared(g, coloring, ready_queues, n_wait) reduction(max: num_colors) reduction(+: shared_vertices_count) { const int tcount = omp_get_num_threads(); const int tid = omp_get_thread_num(); const int64_t part_max_size = (n + (tcount - 1)) / tcount; // part_max_size = ceil(n / tcount); const int64_t part_start = std::min(n, tid * part_max_size); const int64_t part_end = std::min(n, part_start + part_max_size); const int64_t part_size = part_end - part_start; std::vector<NodeId> color_queue; color_queue.reserve(part_size); std::vector<NodeId> local_vertices; local_vertices.reserve(part_size); const size_t num_partition_neighbors = std::distance(g.out_neigh(part_start).begin(), g.out_neigh(part_end - 1).end()); node_queue_list send_queues(part_start, part_end, num_partition_neighbors); size_t max_degree = partition_graph(g, part_start, part_end, rho_seed, n_wait, send_queues, color_queue, local_vertices); shared_vertices_count = part_size - local_vertices.size(); std::vector<bool> color_palette(max_degree + 1, false); ready_queue &in_queue = ready_queues[tid]; in_queue.init(shared_vertices_count); #pragma omp barrier // All n_wait and ready_queues are now initialized #pragma omp master { // timer.endPhase("par_partition"); } num_colors = seq_color(g, coloring, color_queue, n_wait, send_queues, ready_queues, part_max_size, color_palette, order); size_t n_colored = color_queue.size(); color_queue.clear(); while (n_colored < shared_vertices_count) { in_queue.dequeue(color_queue); int32_t cur_max_color = seq_color(g, coloring, color_queue, n_wait, send_queues, ready_queues, part_max_size, color_palette, order); num_colors = std::max(num_colors, cur_max_color); n_colored += color_queue.size(); color_queue.clear(); } // Color local vertices last int32_t cur_max_color = seq_color(g, coloring, local_vertices, n_wait, send_queues, ready_queues, part_max_size, color_palette, order); num_colors = std::max(num_colors, cur_max_color); } // timer.endPhase("par_coloring"); // timer.print(); double local_ratio = 1.0 - (double) shared_vertices_count / n; std::cout << "Local vertex ratio: " << local_ratio << std::endl; return num_colors; } } // namespace GMS::Coloring::JonesV4
bli_axpyv_bgq_int.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of The University of Texas at Austin nor the names of its contributors may be used to endorse or promote products derived derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" void bli_daxpyv_bgq_int ( conj_t conjx, dim_t n, double* restrict alpha, double* restrict x, inc_t incx, double* restrict y, inc_t incy, cntx_t* cntx ) { if ( bli_zero_dim1( n ) ) return; // If there is anything that would interfere with our use of aligned // vector loads/stores, call the reference implementation. bool_t use_ref = FALSE; if ( incx != 1 || incy != 1 || bli_is_unaligned_to( x, 32 ) || bli_is_unaligned_to( y, 32 ) ) { use_ref = TRUE; } // Call the reference implementation if needed. if ( use_ref == TRUE ) { BLIS_DAXPYV_KERNEL_REF( conjx, n, alpha, x, incx, y, incy, cntx ); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; vector4double xv, yv, zv; vector4double alphav = vec_lds( 0 * sizeof(double), (double*)alpha ); #pragma omp parallel for for ( dim_t i = 0; i < n_run; i++ ) { xv = vec_lda( 0 * sizeof(double), &x[i*4] ); yv = vec_lda( 0 * sizeof(double), &y[i*4] ); zv = vec_madd( alphav, xv, yv ); vec_sta( zv, 0 * sizeof(double), &y[i*4] ); } for ( dim_t i = 0; i < n_left; i++ ) { y[4*n_run + i] += *alpha * x[4*n_run + i]; } }
LinkedCellService.h
// Copyright (C) 2015 Technische Universitaet Muenchen // This file is part of the Mamico project. For conditions of distribution // and use, please see the copyright notice in Mamico's main folder, or at // www5.in.tum.de/mamico #ifndef _MOLECULARDYNAMICS_SERVICES_LINKEDCELLSERVICE_H_ #define _MOLECULARDYNAMICS_SERVICES_LINKEDCELLSERVICE_H_ #include <iostream> #include "simplemd/LinkedCell.h" #include "simplemd/Molecule.h" #include "simplemd/molecule-mappings/UpdateLinkedCellListsMapping.h" #include "simplemd/services/ParallelTopologyService.h" #include "tarch/la/Vector.h" namespace simplemd { namespace services { class LinkedCellService; } } /** manages the linked cell data structure of the simulation. * @author Philipp Neumann */ class simplemd::services::LinkedCellService { public: /** initialises the linked cell service: * domainSize - size of local domain * domainOffset - starting coordinate of local domain (lowerLeftFront point) * numberOfCells - local number of cells */ LinkedCellService( const tarch::la::Vector<MD_DIM,double>& domainSize, const tarch::la::Vector<MD_DIM,double>& domainOffset, const simplemd::services::ParallelTopologyService& parallelTopologyService, simplemd::services::MoleculeService& moleculeService ); /** shuts down the service, frees memory and resets all variables */ void shutdown(); /** puts the molecule into the cell defined by the local index (vector) coordinates localCellIndex */ void addMoleculeToLinkedCell(Molecule &molecule, const tarch::la::Vector<MD_DIM,unsigned int> &localCellIndex); /** puts the molecule into the cell defined by the local index (scalar) coordinates localCellIndex */ void addMoleculeToLinkedCell(Molecule &molecule, const unsigned int &localCellIndex); void deleteMoleculeFromLinkedCell(Molecule& molecule, const tarch::la::Vector<MD_DIM,unsigned int> &localCellIndex); /** returns the linked cell at the respective coordinates */ LinkedCell& getLinkedCell(const tarch::la::Vector<MD_DIM,unsigned int> &localCellIndex); /** iterates over all cells in the range defined by the lower left front corner cell lowerLeftFrontCell * and the size of the domain cellRange. cellRange defines a number of cells in each spatial direction that * the class A shall be applied to. lowerLeftFrontCell needs to be given in local coordinates. */ template<class A> void iterateCells( A &a, const tarch::la::Vector<MD_DIM,unsigned int>& lowerLeftFrontCell, const tarch::la::Vector<MD_DIM,unsigned int>& cellRange, const bool& useOpenMP ); /** iterates over all cells in the inner part (i.e. does not consider the ghost layer) */ template<class A> void iterateCells(A &a,const bool& useOpenMP); /** iterates over all cell pairs for the cells in the inner region of each local process */ template<class A> void iterateCellPairs( A &a, const bool& useOpenMP ) const; /** iterates over all cell pairs cell1 and cell2 with cell1 in the range described by lowerLeftFrontCell and cellRange; * cell2 does not need to lie within the range (example: iterate only over lowerLeftFrontCell=(1,1,1) and cellRange=(1,1,1). * Then, we will consider amongst others the pair (0,0,0),(1,1,1)). */ template<class A> void iterateCellPairs( A &a, const tarch::la::Vector<MD_DIM,unsigned int>& lowerLeftFrontCell, const tarch::la::Vector<MD_DIM,unsigned int>& cellRange, const bool& useOpenMP ) const; /** returns the index of the first (non-ghost) cell */ const tarch::la::Vector<MD_DIM,unsigned int>& getLocalIndexOfFirstCell() const; /** returns the number of (non-ghost) cells */ const tarch::la::Vector<MD_DIM,unsigned int>& getLocalNumberOfCells() const; /** returns the mesh width */ const tarch::la::Vector<MD_DIM,double>& getMeshWidth() const; /** returns the local domain offset (for the domain of this process) */ const tarch::la::Vector<MD_DIM,double>& getLocalDomainOffset() const; /** returns the local domain size (for the domain of this process) */ const tarch::la::Vector<MD_DIM,double>& getLocalDomainSize() const; /** returns true if the local cell index cellIndex describes a linked cell within the ghost layer */ bool isGhostCell(const unsigned int &cellIndex) const; /** returns the local cell index vector for the local cell index cellIndex */ tarch::la::Vector<MD_DIM,unsigned int> getLocalCellIndexVector(const unsigned int cellIndex) const; /** returns the local cell index from the local cell index vector */ unsigned int getLocalCellIndex(const tarch::la::Vector<MD_DIM,unsigned int> &cellIndexVector) const; ~LinkedCellService(){ if (_cells != NULL){ delete [] _cells; _cells = NULL; } } private: /** initialise linked-cell structure for local process. * indexOffset denotes the integer coordinates of the first cell * within the local cell structure; the grid has a total of numberOfCells cells. * globalIndexFirstCell denotes the global index of the lower left cell of the simulation. * This is important for parallel computations, only. */ void initCellStructure(); /** returns local index from (local) coordinate vector */ unsigned int getLocalIndexFromLocalVector(const tarch::la::Vector<MD_DIM,unsigned int>& coords) const; /** computes the mesh width from domain size and local number of grid cells */ tarch::la::Vector<MD_DIM,double> getMeshwidth(const tarch::la::Vector<MD_DIM,double>& domainSize, const tarch::la::Vector<MD_DIM,unsigned int>& localNumberCells) const{ tarch::la::Vector<MD_DIM,double> meshWidth(0.0); for (unsigned int d = 0; d < MD_DIM; d++){ meshWidth[d] = domainSize[d]/localNumberCells[d]; } return meshWidth; } /** contains all (local) linked cells */ LinkedCell *_cells; /** size of global domain */ const tarch::la::Vector<MD_DIM,double> _domainSize; /** offset of local domain */ const tarch::la::Vector<MD_DIM,double> _domainOffset; /** mesh width of linked cells */ const tarch::la::Vector<MD_DIM,double> _meshWidth; /** number of cells of local domain, without ghost layer */ const tarch::la::Vector<MD_DIM,unsigned int> _numberOfCells; /** index of first cell under consideration. It is 1,1,1, due to a ghost cell layer * around the domain. */ const tarch::la::Vector<MD_DIM,unsigned int> _indexOffset; /** number of cells of local domain, including ghost layer */ const tarch::la::Vector<MD_DIM,unsigned int> _totalNumberOfCells; /** _totalNumberOfCells(0)*_totalNumberOfCells(1); only stored for performance reasons */ #if (MD_DIM>2) const unsigned int _totalNumberOfCells_X_By_totalNumberOfCells_Y; #endif }; template<class A> void simplemd::services::LinkedCellService::iterateCells( A &a, const tarch::la::Vector<MD_DIM,unsigned int>& lowerLeftFrontCell, const tarch::la::Vector<MD_DIM,unsigned int>& cellRange, const bool& useOpenMP ){ unsigned int index = 0; #if (MD_DEBUG==MD_YES) for (unsigned int d = 0; d < MD_DIM; d++){ if (cellRange[d]==0){std::cout << "ERROR simplemd::services::LinkedCellService::iterateCells: cellRange(" << d << ")==0!" << std::endl; exit(EXIT_FAILURE); } if (lowerLeftFrontCell[d]+cellRange[d] > 2*_indexOffset[d]+_numberOfCells[d]){ std::cout << "ERROR simplemd::services::LinkedCellService::iterateCells(): defined Range does not fit into local sub-domain!" << std::endl; exit(EXIT_FAILURE); } } #endif // start iteration(); a.beginCellIteration(); #if (MD_OPENMP==MD_YES) if (useOpenMP){ const tarch::la::Vector<MD_DIM,unsigned int> size( simplemd::services::LinkedCellService::getInstance().getLocalNumberOfCells() + 2*simplemd::services::LinkedCellService::getInstance().getLocalIndexOfFirstCell() ); const int length = cellRange(0) #if (MD_DIM>1) * cellRange(1) #endif #if (MD_DIM>2) * cellRange(2) #endif ; // loop over domain, but with a single loop #pragma omp parallel for for (int i = 0; i < length; i++){ // compute index of the current cell #if (MD_DIM>1) int helpIndex1 = i; int helpIndex2 = 0; #endif unsigned int index = 0; #if (MD_DIM>2) // determine plane within traversed block helpIndex2 = helpIndex1/(cellRange(0)*cellRange(1)); // save rest of index in helpIndex1 helpIndex1 = helpIndex1-helpIndex2*(cellRange(0)*cellRange(1)); // compute contribution to index index += (lowerLeftFrontCell(2) + helpIndex2)*size(0)*size(1); #endif #if (MD_DIM>1) // determine plane within traversed block helpIndex2 = helpIndex1/cellRange(0); // save rest of index in helpIndex1 helpIndex1 = helpIndex1-helpIndex2*cellRange(0); // compute contribution to index index += (lowerLeftFrontCell(1) + helpIndex2)*size(0); // compute contribution for last dimension index += (lowerLeftFrontCell(0) + helpIndex1); #else index = lowerLeftFrontCell(0)+i; #endif #if (MD_DEBUG==MD_YES) std::cout << "Handle cell " << index << std::endl; #endif // handle cell a.handleCell(_cells[index],index); } } else { #endif tarch::la::Vector<MD_DIM,unsigned int> coords(0); // loop over domain #if (MD_DIM > 2) for (coords[2] = lowerLeftFrontCell[2] ; coords[2] < lowerLeftFrontCell[2]+cellRange[2]; coords[2]++){ #endif #if (MD_DIM > 1) for (coords[1] = lowerLeftFrontCell[1] ; coords[1] < lowerLeftFrontCell[1]+cellRange[1]; coords[1]++){ #endif for (coords[0] = lowerLeftFrontCell[0] ; coords[0] < lowerLeftFrontCell[0]+cellRange[0]; coords[0]++){ #if (MD_DEBUG==MD_YES) std::cout << "Handle cell " << coords << std::endl; #endif index = getLocalIndexFromLocalVector(coords); a.handleCell(_cells[index],index); } #if (MD_DIM > 1) } #endif #if (MD_DIM > 2) } #endif #if (MD_OPENMP==MD_YES) } #endif // end iteration(); a.endCellIteration(); } template<class A> void simplemd::services::LinkedCellService::iterateCells( A &a,const bool& useOpenMP ){ iterateCells(a,_indexOffset,_numberOfCells,useOpenMP); } template<class A> void simplemd::services::LinkedCellService::iterateCellPairs( A &a, const tarch::la::Vector<MD_DIM,unsigned int>& lowerLeftFrontCell, const tarch::la::Vector<MD_DIM,unsigned int>& cellRange, const bool& useOpenMP ) const{ tarch::la::Vector<MD_LINKED_CELL_NEIGHBOURS/2,unsigned int> neighbourOffset; tarch::la::Vector<MD_LINKED_CELL_NEIGHBOURS/2,unsigned int> indexOffset; #if (MD_DIM==1) neighbourOffset[0] = 1; indexOffset[0] = 0; #elif (MD_DIM==2) indexOffset[0] = 0; neighbourOffset[0] = 1; indexOffset[1] = 0; neighbourOffset[1] = _numberOfCells[0] + 2; indexOffset[2] = 0; neighbourOffset[2] = _numberOfCells[0] + 3; indexOffset[3] = 1; neighbourOffset[3] = _numberOfCells[0] + 2; #elif (MD_DIM==3) indexOffset[0] = 0; neighbourOffset[0] = 1; indexOffset[1] = 0; neighbourOffset[1] = _numberOfCells[0] + 2; indexOffset[2] = 0; neighbourOffset[2] = _numberOfCells[0] + 3; indexOffset[3] = 0; neighbourOffset[3] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2); indexOffset[4] = 0; neighbourOffset[4] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2) + 1; indexOffset[5] = 0; neighbourOffset[5] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2) + (_numberOfCells[0] + 2); indexOffset[6] = 0; neighbourOffset[6] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2) + (_numberOfCells[0] + 2) + 1; indexOffset[7] = 1; neighbourOffset[7] = _numberOfCells[0] + 2; indexOffset[8] = 1; neighbourOffset[8] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2); indexOffset[9] = 1; neighbourOffset[9] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2) + (_numberOfCells[0] + 2); indexOffset[10] = _numberOfCells[0]+2; neighbourOffset[10] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2); indexOffset[11] = _numberOfCells[0]+2; neighbourOffset[11] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2) + 1; indexOffset[12] = (_numberOfCells[0] + 2) + 1; neighbourOffset[12] = (_numberOfCells[0] + 2)*(_numberOfCells[1] + 2); #endif #if (MD_DEBUG==MD_YES) for (unsigned int d = 0; d < MD_DIM; d++){ if (cellRange[d] > 2*_indexOffset[d]+_numberOfCells[d]-1){ std::cout << "ERROR simplemd::services::LinkedCellService::iterateCellPairs(): defined Range does not fit into local sub-domain!" << std::endl; exit(EXIT_FAILURE); } } #endif // start iteration(); a.beginCellIteration(); #if (MD_OPENMP==MD_YES) if (useOpenMP){ const tarch::la::Vector<MD_DIM,unsigned int> size( simplemd::services::LinkedCellService::getInstance().getLocalNumberOfCells() + 2*simplemd::services::LinkedCellService::getInstance().getLocalIndexOfFirstCell() ); // iterate over the domain in a red-black manner #if (MD_DIM>2) for (unsigned int z=0; z < 2; z++){ #endif #if (MD_DIM>1) for (unsigned int y=0; y < 2; y++){ #endif for (unsigned int x=0; x < 2; x++){ // determine range/ length of blocks for red-black traversal. // For odd block sizes, we need to do some more work in the x/y/z==0-traversals. // The second x/y/z==1-traversals are reduced by the normal integer-rounding in this case. const tarch::la::Vector<MD_DIM,unsigned int> lengthVector( (cellRange[0] + (cellRange[0]%2)*(x==0))/2 #if (MD_DIM>1) ,(cellRange[1] + (cellRange[1]%2)*(y==0))/2 #endif #if (MD_DIM>2) ,(cellRange[2] + (cellRange[2]%2)*(z==0))/2 #endif ); const int length = lengthVector[0] #if (MD_DIM>1) * lengthVector[1] #endif #if (MD_DIM>2) * lengthVector[2] #endif ; // parallelise loop for all cells that are to be traversed in this way #pragma omp parallel for for (int j = 0; j < length; j++){ // compute index of the current cell unsigned int index = 0; #if (MD_DIM>1) int helpIndex1 = j; int helpIndex2 = 0; #endif unsigned int coordsCell1Buffer; unsigned int coordsCell2Buffer; #if (MD_DIM>2) // determine plane within traversed block helpIndex2 = helpIndex1/(lengthVector[0]*lengthVector[1]); // save rest of index in helpIndex1 helpIndex1 = helpIndex1-helpIndex2*(lengthVector[0]*lengthVector[1]); // compute contribution to index index += (lowerLeftFrontCell[2] + 2*helpIndex2 + z)*size[0]*size[1]; #endif #if (MD_DIM>1) // determine plane within traversed block helpIndex2 = helpIndex1/lengthVector[0]; // save rest of index in helpIndex1 helpIndex1 = helpIndex1-helpIndex2*lengthVector[0]; // compute contribution to index index += (lowerLeftFrontCell[1] + 2*helpIndex2 + y)*size[0]; // compute contribution for last dimension index += (lowerLeftFrontCell[0] + 2*helpIndex1 + x); #else index = lowerLeftFrontCell[0]+2*j + x; #endif #if (MD_DEBUG==MD_YES) std::cout << "Handle cell " << index << std::endl; #endif a.handleCell(_cells[index],index); // handle pairs (lower,left,back-oriented cells) for (unsigned int i = 0; i < MD_LINKED_CELL_NEIGHBOURS/2; i++){ #if (MD_DEBUG == MD_YES) std::cout << "iterateCellPairs: Pair index " << index+indexOffset[i] << "," << index+neighbourOffset[i] << std::endl; #endif coordsCell1Buffer = index+indexOffset[i]; coordsCell2Buffer = index+neighbourOffset[i]; a.handleCellPair(_cells[coordsCell1Buffer],_cells[coordsCell2Buffer],coordsCell1Buffer,coordsCell2Buffer); } } // j } // x #if (MD_DIM > 1) } // y #endif #if (MD_DIM > 2) } // z #endif // now: no open mp } else { #endif tarch::la::Vector<MD_DIM,unsigned int> coords(0); unsigned int coordsCell1Buffer(0); unsigned int coordsCell2Buffer(0); unsigned int index; // loop over domain #if (MD_DIM > 2) for (coords[2] = lowerLeftFrontCell[2]; coords[2] < lowerLeftFrontCell[2]+cellRange[2]; coords[2]++){ #endif #if (MD_DIM > 1) for (coords[1] = lowerLeftFrontCell[1]; coords[1] < lowerLeftFrontCell[1]+cellRange[1]; coords[1]++){ #endif for (coords[0] = lowerLeftFrontCell[0]; coords[0] < lowerLeftFrontCell[0]+cellRange[0]; coords[0]++){ // handle cell itself index = getLocalIndexFromLocalVector(coords); #if (MD_DEBUG == MD_YES) std::cout <<"iterateCellPairs: Single index " << index << std::endl; #endif a.handleCell(_cells[index],index); // handle pairs (lower,left,back-oriented cells) for (unsigned int i = 0; i < MD_LINKED_CELL_NEIGHBOURS/2; i++){ #if (MD_DEBUG == MD_YES) std::cout <<"iterateCellPairs: Pair index " << index+indexOffset[i] << "," << index+neighbourOffset[i] << std::endl; #endif coordsCell1Buffer = index+indexOffset[i]; coordsCell2Buffer = index+neighbourOffset[i]; a.handleCellPair(_cells[coordsCell1Buffer],_cells[coordsCell2Buffer],coordsCell1Buffer,coordsCell2Buffer); } } // coords(0) #if (MD_DIM > 1) } #endif #if (MD_DIM > 2) } #endif #if (MD_OPENMP==MD_YES) } #endif // end iteration(); a.endCellIteration(); } template<class A> void simplemd::services::LinkedCellService::iterateCellPairs(A &a,const bool& useOpenMP) const{ const tarch::la::Vector<MD_DIM,unsigned int> pairIterationStart(0); const tarch::la::Vector<MD_DIM,unsigned int> pairIterationLength( getLocalNumberOfCells() + getLocalIndexOfFirstCell() ); iterateCellPairs(a,pairIterationStart,pairIterationLength,useOpenMP); } #endif // _MOLECULARDYNAMICS_SERVICES_LINKEDCELLSERVICE_H_
VolumetricAdaptiveAveragePooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/VolumetricAdaptiveAveragePooling.c" #else #define START_IND(a,b,c) (int)floor((float)(a * c) / b) #define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 // 5d tensor B x D x T x H x W static void THNN_(VolumetricAdaptiveAveragePooling_updateOutput_frame)( scalar_t *input_p, scalar_t *output_p, int64_t sizeD, int64_t isizeT, int64_t isizeH, int64_t isizeW, int64_t osizeT, int64_t osizeH, int64_t osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { /* loop over output */ int64_t ot, oh, ow; for(ot = 0; ot < osizeT; ot++) { int istartT = START_IND(ot, osizeT, isizeT); int iendT = END_IND(ot, osizeT, isizeT); int kT = iendT - istartT; for(oh = 0; oh < osizeH; oh++) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = 0; ow < osizeW; ow++) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; /* local pointers */ scalar_t *ip = input_p + d*istrideD + istartT*istrideT + istartH*istrideH + istartW*istrideW; scalar_t *op = output_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow; /* compute local average: */ scalar_t sum = 0; int it, ih, iw; for(it = 0; it < kT; it++) { for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { scalar_t val = *(ip + it*istrideT + ih*istrideH + iw*istrideW); sum += val; } } } /* set output to local average */ *op = sum / kT / kH / kW; } } } } } void THNN_(VolumetricAdaptiveAveragePooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int osizeT, int osizeW, int osizeH) { int dimD = 0; int dimT = 1; int dimH = 2; int dimW = 3; int64_t sizeB = 1; int64_t sizeD = 0; int64_t isizeT = 0; int64_t isizeH = 0; int64_t isizeW = 0; int64_t istrideB = 0; int64_t istrideD = 0; int64_t istrideT = 0; int64_t istrideH = 0; int64_t istrideW = 0; scalar_t *input_data = nullptr; scalar_t *output_data = nullptr; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input, "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 5) { istrideB = input->stride(0); sizeB = input->size(0); dimD++; dimT++; dimH++; dimW++; } /* sizes */ sizeD = input->size(dimD); isizeT = input->size(dimT); isizeH = input->size(dimH); isizeW = input->size(dimW); /* strides */ istrideD = input->stride(dimD); istrideT = input->stride(dimT); istrideH = input->stride(dimH); istrideW = input->stride(dimW); /* resize output */ if (input->dim() == 4) { THTensor_(resize4d)(output, sizeD, osizeT, osizeH, osizeW); input_data = input->data<scalar_t>(); output_data = output->data<scalar_t>(); THNN_(VolumetricAdaptiveAveragePooling_updateOutput_frame)(input_data, output_data, sizeD, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); } else { int64_t b; THTensor_(resize5d)(output, sizeB, sizeD, osizeT, osizeH, osizeW); input_data = input->data<scalar_t>(); output_data = output->data<scalar_t>(); #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(VolumetricAdaptiveAveragePooling_updateOutput_frame)(input_data+b*istrideB, output_data+b*sizeD*osizeT*osizeH*osizeW, sizeD, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); } } } static void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput_frame)( scalar_t *gradInput_p, scalar_t *gradOutput_p, int64_t sizeD, int64_t isizeT, int64_t isizeH, int64_t isizeW, int64_t osizeT, int64_t osizeH, int64_t osizeW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { scalar_t *gradInput_p_d = gradInput_p + d*isizeT*isizeW*isizeH; scalar_t *gradOutput_p_d = gradOutput_p + d*osizeT*osizeW*osizeH; /* calculate average */ int64_t ot, oh, ow; for(ot = 0; ot < osizeT; ot++) { int istartT = START_IND(ot, osizeT, isizeT); int iendT = END_IND(ot, osizeT, isizeT); int kT = iendT - istartT; for(oh = 0; oh < osizeH; oh++) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = 0; ow < osizeW; ow++) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; scalar_t grad_delta = gradOutput_p_d[ot*osizeH*osizeW + oh*osizeW + ow] / kT / kH / kW; int it, ih, iw; for(it = istartT; it < iendT; it++) { for(ih = istartH; ih < iendH; ih++) { for(iw = istartW; iw < iendW; iw++) { /* update gradient */ gradInput_p_d[it*isizeH*isizeW + ih*isizeW + iw] += grad_delta; } } } } } } } } void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput) { int dimD = 0; int dimT = 1; int dimH = 2; int dimW = 3; int64_t sizeB = 1; int64_t sizeD; int64_t isizeT; int64_t isizeH; int64_t isizeW; int64_t osizeT; int64_t osizeH; int64_t osizeW; scalar_t *gradInput_data; scalar_t *gradOutput_data; /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->dim() == 5) { sizeB = input->size(0); dimD++; dimT++; dimH++; dimW++; } /* sizes */ sizeD = input->size(dimD); isizeT = input->size(dimT); isizeH = input->size(dimH); isizeW = input->size(dimW); osizeT = gradOutput->size(dimT); osizeH = gradOutput->size(dimH); osizeW = gradOutput->size(dimW); /* get raw pointers */ gradInput_data = gradInput->data<scalar_t>(); gradOutput_data = gradOutput->data<scalar_t>(); /* backprop */ if (input->dim() == 4) { THNN_(VolumetricAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data, gradOutput_data, sizeD, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); } else { int64_t b; #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(VolumetricAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data+b*sizeD*isizeT*isizeH*isizeW, gradOutput_data+b*sizeD*osizeT*osizeH*osizeW, sizeD, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); } } /* cleanup */ c10::raw::intrusive_ptr::decref(gradOutput); } #endif #undef START_IND #undef END_IND
q_rhashmap_mk_loc.c
#include "q_rhashmap_common.h" #include "q_rhashmap_mk_loc.h" int q_rhashmap_mk_loc( uint32_t *hashes, // input [nkeys] uint32_t nkeys, // input uint32_t hmap_size, // input uint32_t *locs // output [nkeys] ) { int status = 0; int chunk_size = 1024; uint64_t divinfo = fast_div32_init(hmap_size); #pragma omp parallel for schedule(static, chunk_size) for ( uint32_t i = 0; i < nkeys; i++ ) { locs[i] = fast_rem32(hashes[i], hmap_size, divinfo); } return status; }