source
stringlengths
3
92
c
stringlengths
26
2.25M
DRB057-jacobiinitialize-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Use of private() clause */ #include <stdio.h> #include <math.h> #define MSIZE 200 #include <omp.h> int n = 200; int m = 200; double alpha = 0.0543; double u[200][200]; double f[200][200]; double uold[200][200]; double dx; double dy; void initialize() { int i; int j; int xx; int yy; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy) for (j = 0; j <= m - 1; j += 1) { /* -1 < x < 1 */ xx = ((int )(- 1.0 + dx * (i - 1))); /* -1 < y < 1 */ yy = ((int )(- 1.0 + dy * (j - 1))); u[i][j] = 0.0; f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)); } } } int main() { initialize(); int i; int j; for (i = 0; i <= n - 1; i += 1) { for (j = 0; j <= m - 1; j += 1) { printf("%lf %lf\n",u[i][j],f[i][j]); } } return 0; }
GB_unaryop__identity_uint16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_uint32 // op(A') function: GB_tran__identity_uint16_uint32 // C type: uint16_t // A type: uint32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_uint32 ( uint16_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel-simple.c
/* * parallel-simple.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) { if (omp_get_thread_num() == 1) { var++; } } // implicit barrier var++; fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
ejercicio2-1.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main(){ int i, n = 7; int a[n], suma; for (i=0; i<n; i++) a[i] = i; suma=5; #pragma omp parallel private(suma) { #pragma omp for for(i=0; i<n; i++){ suma = suma + a[i]; printf("thread %d suma a[%d]/", omp_get_thread_num(), i); } printf("\n* thread %d suma= %d", omp_get_thread_num(), suma); } printf("\n"); }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-126,128)),ceild(3*t1-254,256)),ceild(24*t2-Nz-1011,1024)),ceild(8*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(12*t1+Nx+15,1024)),floord(24*t2+Nx+11,1024)),floord(8*t3+Nx-5,1024)),floord(24*t1-24*t2+Nz+Nx+13,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),256*t4+254);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
vlad.c
/** @file vlad.c ** @brief VLAD - Declaration ** @author David Novotny ** @author Andrea Vedaldi **/ /* Copyright (C) 2013 David Novotny and Andera Vedaldi. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page vlad Vector of Locally Aggregated Descriptors (VLAD) encoding @author David Novotny @author Andrea Vedaldi <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @ref vlad.h implements the *Vector of Linearly Aggregated Descriptors* (VLAD) image representation @cite{jegou10aggregating} @cite{arandjelovic13all-about}. @ref vlad-starting demonstreates how to use the C API to compute the VLAD representation of an image. For further details on the VLAD image representation refer to: - @subpage vlad-fundamentals - VLAD definition and computation. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section vlad-starting Getting started with VLAD <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The VLAD encoding of a set of features is obtained by using the function ::vl_vlad_encode. The function can be applied to both @c float or @c double data types. ::vl_vlad_encode requires a visual dictionary, for example obtained by using @ref kmeans. Furthermore, the assignments of features to dictionary elements must be pre-computed, for example by using @ref kdtree. In the following example code, the vocabulary is first created using the KMeans clustering, then the points, that are to be encoded are assigned to its corresponding nearest vocabulary words, after that the original vlad encoding routine without any normalization option takes place. At the end of the process the encoding is stored in the @c enc variable. @code vl_uint32 * indexes; float * assignments; float * enc int i; // create a KMeans object and run clustering to get vocabulary words (centers) kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ; vl_kmeans_cluster (kmeans, data, dimension, numData, numCenters) ; // find nearest cliuster centers for the data that should be encoded indexes = vl_malloc(sizeof(vl_uint32) * numDataToEncode); vl_kmeans_quantize(kmeans,indexes,dataToEncode,numDataToEncode); // convert indexes array to assignments array, // which can be processed by vl_vlad_encode assignments = vl_malloc(sizeof(float) * numDataToEncode * numCenters); memset(assignments, 0, sizeof(float) * numDataToEncode * numCenters); for(i = 0; i < numDataToEncode; i++) { assignments[i + numCenters * indexes[i]] = 1.; } // allocate space for vlad encoding enc = vl_malloc(sizeof(TYPE) * dimension * numCenters); // do the encoding job vl_vlad_encode (enc, VL_F_TYPE, vl_kmeans_get_centers(kmeans), dimension, numCenters, data, numData, assignments, 0) ; @endcode Various @ref vlad-normalization normalizations can be applied to the VLAD vectors. These are controlled by the parameter @a flag of ::vl_vlad_encode. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page vlad-fundamentals VLAD fundamentals @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> This page describes the *Vector of Locally Aggregated Descriptors* (VLAD) image encoding of @cite{jegou10aggregating}. See @ref vlad for an overview of the C API. VLAD is a *feature encoding and pooling* method, similar to @ref fisher "Fisher vectors". VLAD encodes a set of local feature descriptors $I=(\bx_1,\dots,\bx_n)$ extracted from an image using a dictionary built using a clustering method such as @ref gmm or @ref kmeans. Let $q_{ik}$ be the strength of the association of data vector $\bx_i$ to cluster $\mu_k$, such that $q_{ik} \geq 0$ and $\sum_{k=1}^K q_{ik} = 1$. The association may be either soft (e.g. obtained as the posterior probabilities of the GMM clusters) or hard (e.g. obtained by vector quantization with K-means). $\mu_k$ are the cluster *means*, vectors of the same dimension as the data $\bx_i$. VLAD encodes feature $\bx$ by considering the *residuals* \[ \bv_k = \sum_{i=1}^{N} q_{ik} (\bx_{i} - \mu_k). \] The residulas are stacked together to obtain the vector \[ \hat\Phi(I) = \begin{bmatrix} \vdots \\ \bv_k \\ \vdots \end{bmatrix} \] Before the VLAD encoding is used it is usually normalized, as explained @ref vlad-normalization next. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section vlad-normalization VLAD normalization <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> VLFeat VLAD implementation supports a number of different normalization strategies. These are optionally applied in this order: - **Component-wise mass normalization.** Each vector $\bv_k$ is divided by the total mass of features associated to it $\sum_{i=1}^N q_{ik}$. - **Square-rooting.** The function $\sign(z)\sqrt{|z|}$ is applied to all scalar components of the VLAD descriptor. - **Component-wise $l^2$ normalization.** The vectors $\bv_k$ are divided by their norm $\|\bv_k\|_2$. - **Global $l^2$ normalization.** The VLAD descriptor $\hat\Phi(I)$ is divided by its norm $\|\hat\Phi(I)\|_2$. */ #include "vlad.h" #include "mathop.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #if defined(_OPENMP) #include <omp.h> #endif /* ================================================================ */ #ifdef VL_VLAD_INSTANTIATING static void VL_XCAT(_vl_vlad_encode_, SFX) (TYPE * enc, TYPE const * means, vl_size dimension, vl_size numClusters, TYPE const * data, vl_size numData, TYPE const * assignments, int flags) { vl_uindex dim ; vl_index i_cl, i_d ; memset(enc, 0, sizeof(TYPE) * dimension * numClusters) ; #if defined(_OPENMP) #pragma omp parallel for default(shared) private(i_cl,i_d,dim) num_threads(vl_get_max_threads()) #endif for (i_cl = 0; i_cl < (signed)numClusters; i_cl++) { double clusterMass = 0 ; for (i_d = 0; i_d < (signed)numData; i_d++) { if (assignments[i_d*numClusters + i_cl] > 0) { double q = assignments[i_d*numClusters+i_cl] ; clusterMass += q ; for(dim = 0; dim < dimension; dim++) { enc [i_cl * dimension + dim] += q * data [i_d * dimension + dim] ; } } } if (clusterMass > 0) { if (flags & VL_VLAD_FLAG_NORMALIZE_MASS) { for(dim = 0; dim < dimension; dim++) { enc[i_cl*dimension + dim] /= clusterMass ; enc[i_cl*dimension + dim] -= means[i_cl*dimension+dim]; } } else { for(dim = 0; dim < dimension; dim++) { enc[i_cl*dimension + dim] -= clusterMass * means[i_cl*dimension+dim]; } } } if (flags & VL_VLAD_FLAG_SQUARE_ROOT) { for(dim = 0; dim < dimension; dim++) { TYPE z = enc[i_cl*dimension + dim] ; if (z >= 0) { enc[i_cl*dimension + dim] = VL_XCAT(vl_sqrt_, SFX)(z) ; } else { enc[i_cl*dimension + dim] = - VL_XCAT(vl_sqrt_, SFX)(- z) ; } } } if (flags & VL_VLAD_FLAG_NORMALIZE_COMPONENTS) { TYPE n = 0 ; dim = 0 ; for(dim = 0; dim < dimension; dim++) { TYPE z = enc[i_cl*dimension + dim] ; n += z * z ; } n = VL_XCAT(vl_sqrt_, SFX)(n) ; n = VL_MAX(n, 1e-12) ; for(dim = 0; dim < dimension; dim++) { enc[i_cl*dimension + dim] /= n ; } } } if (! (flags & VL_VLAD_FLAG_UNNORMALIZED)) { TYPE n = 0 ; for(dim = 0 ; dim < dimension * numClusters ; dim++) { TYPE z = enc [dim] ; n += z * z ; } n = VL_XCAT(vl_sqrt_, SFX)(n) ; n = VL_MAX(n, 1e-12) ; for(dim = 0 ; dim < dimension * numClusters ; dim++) { enc[dim] /= n ; } } } /* VL_FISHER_INSTANTIATING */ #else #ifndef __DOXYGEN__ #define FLT VL_TYPE_FLOAT #define TYPE float #define SFX f #define VL_VLAD_INSTANTIATING #include "vlad.c" #define FLT VL_TYPE_DOUBLE #define TYPE double #define SFX d #define VL_VLAD_INSTANTIATING #include "vlad.c" #endif /* VL_VLAD_INSTANTIATING */ #endif /* ================================================================ */ #ifndef VL_VLAD_INSTANTIATING /** @brief VLAD encoding of a set of vectors. ** @param enc output VLAD encoding (out). ** @param dataType the type of the input data (::VL_TYPE_DOUBLE or ::VL_TYPE_FLOAT). ** @param numData number of data vectors to encode. ** @param means cluster means. ** @param numClusters number of clusters. ** @param data the data vectors to encode. ** @param dimension dimensionality of the data. ** @param assignments data to cluster soft assignments. ** @param flags options. ** ** @a enc is the VLAD vector of size @a numClusters by ** @a dimension. @a means is a matrix with @a numClusters columns and ** @a dimension rows. @a data is the matrix of vectors to be encoded, ** with @a dimension rows and @a numData columns. @a assignments is a ** matrix with @a numClusters rows and @a numData columns. ** All the matrices should be stored in a row major order. ** ** @a flag allows controlling further options: ** ::VL_VLAD_FLAG_NORMALIZE_COMPONENTS, ::VL_VLAD_FLAG_SQUARE_ROOT, ** ::VL_VLAD_FLAG_UNNORMALIZED, and ::VL_VLAD_FLAG_NORMALIZE_MASS. ** ** @sa @ref vlad **/ void vl_vlad_encode (void * enc, vl_type dataType, void const * means, vl_size dimension, vl_size numClusters, void const * data, vl_size numData, void const * assignments, int flags) { switch(dataType) { case VL_TYPE_FLOAT: _vl_vlad_encode_f ((float *) enc, (float const *) means, dimension, numClusters, (float const *) data, numData, (float const *) assignments, flags) ; break; case VL_TYPE_DOUBLE: _vl_vlad_encode_d ((double *) enc, (double const *) means, dimension, numClusters, (double const *) data, numData, (double const *) assignments, flags) ; break; default: abort(); } } /* ! VL_VLAD_INSTANTIATING */ #endif #undef SFX #undef TYPE #undef FLT #undef VL_VLAD_INSTANTIATING
omp_loop2.c
/* vim: set ts=4 sw=4: */ /* Filename : omp_loop2.c * Description : simple OpenMP model * Author : SunYoung Kim <sunyzero@gmail.com> * Notes : */ #include <stdio.h> #include <omp.h> int main() { int i; /* combine two clauses */ #pragma omp parallel for for (i=0; i<8; i++) { printf("[%d] Hello OpenMP\n", i); } /* implicit barrier */ return 0; }
core_ztrssq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ // This computation also shows up in core_zsyssq() and can be factored out. // LAPACK does real and imag components separately in zlassq. static inline void ssq(plasma_complex64_t value, double *scale, double *sumsq) { double absa = cabs(value); if (absa != 0.0) { // != propagates nan if (*scale < absa) { *sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa)); *scale = absa; } else { *sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale))); } } } /******************************************************************************/ void core_ztrssq(plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq) { if (uplo == PlasmaUpper) { if (diag == PlasmaNonUnit) { for (int j = 0; j < n; j++) { ssq(A[lda*j], scale, sumsq); for (int i = 1; i < imin(j+1, m); i++) { ssq(A[lda*j+i], scale, sumsq); } } } else { // PlasmaUnit int j; for (j = 0; j < imin(n, m); j++) { ssq(1.0, scale, sumsq); for (int i = 0; i < j; i++) { ssq(A[lda*j+i], scale, sumsq); } } for (; j < n; j++) { ssq(A[lda*j], scale, sumsq); for (int i = 1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } } else { // PlasmaLower if (diag == PlasmaNonUnit) { for (int j = 0; j < imin(n, m); j++) { ssq(A[lda*j+j], scale, sumsq); for (int i = j+1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } else { // PlasmaUnit for (int j = 0; j < imin(n, m); j++) { ssq(1.0, scale, sumsq); for (int i = j+1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } } } /******************************************************************************/ void core_omp_ztrssq(plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:scale[0:n]) \ depend(out:sumsq[0:n]) { if (sequence->status == PlasmaSuccess) { *scale = 0.0; *sumsq = 1.0; core_ztrssq(uplo, diag, m, n, A, lda, scale, sumsq); } } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
haval_fmt_plug.c
/* HAVAL cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_haval_256_3; extern struct fmt_main fmt_haval_128_4; #elif FMT_REGISTERS_H john_register_one(&fmt_haval_256_3); john_register_one(&fmt_haval_128_4); #else #include <string.h> #include "arch.h" #include "sph_haval.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // Tuned on core i7 quad HT // 256-3 128-4 // 1 227k 228k // 64 6359k 5489k // 128 7953k 6654k // 256 8923k 7618k // 512 9804k 8223k // 1k 10307k 8569k ** set to this value // 2k 10081k 8427k // 4k 10551k 8893k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 64 #else #define OMP_SCALE 1024 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_TAG "$haval$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE256 32 #define BINARY_SIZE128 16 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests haval_256_3_tests[] = { {"91850C6487C9829E791FC5B58E98E372F3063256BB7D313A93F1F83B426AEDCC", "HAVAL"}, {"$haval$91850C6487C9829E791FC5B58E98E372F3063256BB7D313A93F1F83B426AEDCC", "HAVAL"}, // john.pot uses lower case hex, so repeat that hash with lower case hex {"$haval$91850c6487c9829e791fc5b58e98e372f3063256bb7d313a93f1f83b426aedcc", "HAVAL"}, {"8699f1e3384d05b2a84b032693e2b6f46df85a13a50d93808d6874bb8fb9e86c", "abc"}, {"$haval$8699f1e3384d05b2a84b032693e2b6f46df85a13a50d93808d6874bb8fb9e86c", "abc"}, {"cd43bec91c50e5f781fc50a78a3e9c8c48b407fa35a20c972178d63867dbe158", "john"}, {"$haval$cd43bec91c50e5f781fc50a78a3e9c8c48b407fa35a20c972178d63867dbe158", "john"}, {"5aa9c913463f82260071629c8ac2c54d73b3af016ffd8e8ce128558d909fab06", "passweird"}, {"$haval$5aa9c913463f82260071629c8ac2c54d73b3af016ffd8e8ce128558d909fab06", "passweird"}, {NULL} }; static struct fmt_tests haval_128_4_tests[] = { {"EE6BBF4D6A46A679B3A856C88538BB98", ""}, {"$haval$ee6bbf4d6a46a679b3a856c88538bb98", ""}, {"6f2132867c9648419adcd5013e532fa2", "abc"}, {"$haval$6f2132867c9648419adcd5013e532fa2", "abc"}, {"c98232b4ae6e7ef3235e838387111f23", "john"}, {"$haval$c98232b4ae6e7ef3235e838387111f23", "john"}, {"50683b38df349781b2ef29e7720eb730", "passweird"}, {"$haval$50683b38df349781b2ef29e7720eb730", "passweird"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE256 / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (!saved_key) { saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self, int len) { char *p; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (strnlen(p, len + 1) != len) return 0; while(*p) if (atoi16[ARCH_INDEX(*p++)] == 0x7f) return 0; return 1; } /* we need independent valids, since the $haval$ signature is the same */ /* otherwise, if we have input with a mix of both types, then ALL of them */ /* will validate, even though only the ones of the proper type will actually */ /* be tested. If we had a singleton crypt function (which both 128-4 and */ /* 256-3 used, then a single valid would also work. But since each have */ /* their own crypt, and they are NOT compatible, then we need separate valids */ static int valid3(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 64); } static int valid4(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 32); } static void *get_binary_256(char *ciphertext) { static union { unsigned char c[32]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < 32; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void *get_binary_128(char *ciphertext) { static union { unsigned char c[16]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < 16; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_256_3(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_haval256_3_context ctx; sph_haval256_3_init(&ctx); sph_haval256_3(&ctx, saved_key[index], strlen(saved_key[index])); sph_haval256_3_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int crypt_128_4(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_haval128_4_context ctx; sph_haval128_4_init(&ctx); sph_haval128_4(&ctx, saved_key[index], strlen(saved_key[index])); sph_haval128_4_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one256(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE256); } static int cmp_one128(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE128); } static int cmp_exact(char *source, int index) { return 1; } static void haval_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + 2 * BINARY_SIZE256 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; strcpy(out, FORMAT_TAG); strcpy(&out[TAG_LENGTH], ciphertext); strlwr(&out[TAG_LENGTH]); return out; } struct fmt_main fmt_haval_256_3 = { { "HAVAL-256-3", "", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE256, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, haval_256_3_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid3, split, get_binary_256, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, haval_set_key, get_key, fmt_default_clear_keys, crypt_256_3, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one256, cmp_exact } }; struct fmt_main fmt_haval_128_4 = { { "HAVAL-128-4", "", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE128, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { NULL }, haval_128_4_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid4, split, get_binary_128, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, haval_set_key, get_key, fmt_default_clear_keys, crypt_128_4, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one128, cmp_exact } }; #endif /* plugin stanza */
main.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #include <math.h> double *matxvet(int m, int n, int *x, double **A); int main() { int n ,m, i, j, *x; double ** mat, *b; printf("Inserire il numero di righe della matrice: "); scanf("%d", &n); printf("Inserire il numero di colonne della matrice: "); scanf("%d", &m); //Allocazioni dinamiche. x = (int *) malloc(m*sizeof(int)); b = (double *) malloc(n*sizeof(double)); mat = (double **) malloc(n*sizeof(double *)); for (i = 0; i < n; i++) mat[i]=(double *) malloc(m*sizeof(double *)); printf("\n"); for (i = 0; i < m; i++){ printf("Inserire elemento nel vettore X: \n"); scanf("%d", &x[i]); } //Stampa del vettore. for (i = 0; i < m; i++){ printf("Vettore X: %d\n", x[i]); } srand(time(NULL)); //Inserimento valori nella matrice. for (i = 0; i < n; i++) for (j = 0; j < m; j++){ mat[i][j] = 1 + rand() %10; //Riempimento della matrice con numeri casuali da 1 a 50. } //Stampa della matrice. for (i = 0; i < n; i++){ for (j = 0; j < m; j++){ printf("%f\t", mat[i][j]); } printf("\n"); } printf("\n"); b = matxvet(m, n, x, mat); //Stampa del risultato. for (i = 0; i < n; i++){ printf("Vettore b: %f\n", b[i]); } free(x); free(b); free(mat); } double *matxvet(int m, int n, int *x, double **A) { int i, j; double *b; b = (double*)malloc(n*sizeof(double)); #pragma omp parallel for default(none) shared(m,n,A,x,b) private(i,j) for(i = 0; i < n; i++){ for(j = 0; j < m; j++) b[i] += A[i][j]*x[j]; } return b; }
genScalData.c
#include "defs.h" /* Set this variable to zero to run the data generator on one thread (for debugging purposes) */ #define PARALLEL_SDG 0 double genScalData(graphSDG* SDGdata) { VERT_T *src, *dest; WEIGHT_T *wt; LONG_T n, m; VERT_T *permV; #ifdef _OPENMP omp_lock_t* vLock; #endif double elapsed_time; int seed; n = N; m = M; /* allocate memory for edge tuples */ src = (VERT_T *) malloc(M*sizeof(VERT_T)); dest = (VERT_T *) malloc(M*sizeof(VERT_T)); assert(src != NULL); assert(dest != NULL); /* sprng seed */ seed = 2387; elapsed_time = get_seconds(); #ifdef _OPENMP #if PARALLEL_SDG omp_set_num_threads(omp_get_max_threads()); // omp_set_num_threads(16); #else omp_set_num_threads(1); #endif #endif #ifdef _OPENMP #pragma omp parallel #endif { int tid, nthreads; #ifdef DIAGNOSTIC double elapsed_time_part; #endif int *stream; LONG_T i, j, u, v, step; DOUBLE_T av, bv, cv, dv, p, S, var; LONG_T tmpVal; #ifdef _OPENMP nthreads = omp_get_num_threads(); tid = omp_get_thread_num(); #else nthreads = 1; tid = 0; #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef DIAGNOSTIC if (tid == 0) elapsed_time_part = get_seconds(); #endif /* Start adding edges */ #ifdef _OPENMP #pragma omp for #endif for (i=0; i<m; i++) { u = 1; v = 1; step = n/2; av = A; bv = B; cv = C; dv = D; p = sprng(stream); if (p < av) { /* Do nothing */ } else if ((p >= av) && (p < av+bv)) { v += step; } else if ((p >= av+bv) && (p < av+bv+cv)) { u += step; } else { u += step; v += step; } for (j=1; j<SCALE; j++) { step = step/2; /* Vary a,b,c,d by up to 10% */ var = 0.1; av *= 0.95 + var * sprng(stream); bv *= 0.95 + var * sprng(stream); cv *= 0.95 + var * sprng(stream); dv *= 0.95 + var * sprng(stream); S = av + bv + cv + dv; av = av/S; bv = bv/S; cv = cv/S; dv = dv/S; /* Choose partition */ p = sprng(stream); if (p < av) { /* Do nothing */ } else if ((p >= av) && (p < av+bv)) { v += step; } else if ((p >= av+bv) && (p < av+bv+cv)) { u += step; } else { u += step; v += step; } } src[i] = u-1; dest[i] = v-1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Tuple generation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Generate vertex ID permutations */ if (tid == 0) { permV = (VERT_T *) malloc(N*sizeof(VERT_T)); assert(permV != NULL); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { permV[i] = i; } #ifdef _OPENMP if (tid == 0) { vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); assert(vLock != NULL); } #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n*sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif tmpVal = permV[i]; permV[i] = permV[j]; permV[j] = tmpVal; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #pragma omp barrier if (tid == 0) { free(vLock); } #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<m; i++) { src[i] = permV[src[i]]; dest[i] = permV[dest[i]]; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Permuting vertex IDs: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif if (tid == 0) { free(permV); } /* Generate edge weights */ if (tid == 0) { wt = (WEIGHT_T *) malloc(M*sizeof(WEIGHT_T)); assert(wt != NULL); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { wt[i] = 1 + MaxIntWeight * sprng(stream); } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Generating edge weights: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif SDGdata->n = n; SDGdata->m = m; SDGdata->startVertex = src; SDGdata->endVertex = dest; SDGdata->weight = wt; free_sprng(stream); #ifdef _OPENMP #endif } elapsed_time = get_seconds() - elapsed_time; return elapsed_time; }
residualbased_newton_raphson_strategy.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY // System includes #include <iostream> // External includes // Project includes #include "includes/define.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/builtin_timer.h" //default builder and solver #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonStrategy * @ingroup KratosCore * @brief This is the base Newton Raphson strategy * @details This strategy iterates until the convergence is achieved (or the maximum number of iterations is surpassed) using a Newton Raphson algorithm * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; // Counted pointer of ClassName KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedNewtonRaphsonStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor */ explicit ResidualBasedNewtonRaphsonStrategy() : BaseType() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart) : ResidualBasedNewtonRaphsonStrategy(rModelPart, ResidualBasedNewtonRaphsonStrategy::GetDefaultParameters()) { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } else { KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "BuilderAndSolver is not initialized. Please assign one before settings flags" << std::endl; } mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); } /** * Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : BaseType(rModelPart, MoveMeshFlag), mpScheme(pScheme), mpConvergenceCriteria(pNewConvergenceCriteria), mReformDofSetAtEachStep(ReformDofSetAtEachStep), mCalculateReactionsFlag(CalculateReactions), mSolutionStepIsInitialized(false), mMaxIterationNumber(MaxIterations), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver)); // Tells to the builder and solver if the reactions have to be Calculated or not mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH(""); } /** * @brief Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : BaseType(rModelPart, MoveMeshFlag), mpScheme(pScheme), mpBuilderAndSolver(pNewBuilderAndSolver), mpConvergenceCriteria(pNewConvergenceCriteria), mReformDofSetAtEachStep(ReformDofSetAtEachStep), mCalculateReactionsFlag(CalculateReactions), mSolutionStepIsInitialized(false), mMaxIterationNumber(MaxIterations), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH("") } /** * @brief Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver") explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag) { KRATOS_TRY KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl; // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // We check if the linear solver considered for the builder and solver is consistent auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver(); KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl; KRATOS_CATCH("") } /** * Constructor with Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param Settings Settings used in the strategy */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, Parameters Settings) : BaseType(rModelPart), mpScheme(pScheme), mpConvergenceCriteria(pNewConvergenceCriteria), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver)); // Tells to the builder and solver if the reactions have to be Calculated or not mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH(""); } /** * @brief Constructor specifying the builder and solver and using Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param Settings Settings used in the strategy */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, Parameters Settings) : BaseType(rModelPart), mpScheme(pScheme), mpBuilderAndSolver(pNewBuilderAndSolver), mpConvergenceCriteria(pNewConvergenceCriteria), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY // Validate and assign defaults Settings = this->ValidateAndAssignParameters(Settings, this->GetDefaultParameters()); this->AssignSettings(Settings); // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH("") } /** * @brief Constructor specifying the builder and solver and using Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param Parameters Settings used in the strategy */ KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver") ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, Parameters Settings) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, Settings) { KRATOS_TRY KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl; // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // We check if the linear solver considered for the builder and solver is consistent auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver(); KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl; KRATOS_CATCH("") } /** * @brief Destructor. * @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear(). */ ~ResidualBasedNewtonRaphsonStrategy() override { // If the linear solver has not been deallocated, clean it before // deallocating mpA. This prevents a memory error with the the ML // solver (which holds a reference to it). // NOTE: The linear solver is hold by the B&S auto p_builder_and_solver = this->GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { p_builder_and_solver->Clear(); } // Deallocating system vectors to avoid errors in MPI. Clear calls // TrilinosSpace::Clear for the vectors, which preserves the Map of // current vectors, performing MPI calls in the process. Due to the // way Python garbage collection works, this may happen after // MPI_Finalize has already been called and is an error. Resetting // the pointers here prevents Clear from operating with the // (now deallocated) vectors. mpA.reset(); mpDx.reset(); mpb.reset(); Clear(); } /** * @brief Set method for the time scheme * @param pScheme The pointer to the time scheme considered */ void SetScheme(typename TSchemeType::Pointer pScheme) { mpScheme = pScheme; }; /** * @brief Get method for the time scheme * @return mpScheme: The pointer to the time scheme considered */ typename TSchemeType::Pointer GetScheme() { return mpScheme; }; /** * @brief Set method for the builder and solver * @param pNewBuilderAndSolver The pointer to the builder and solver considered */ void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver) { mpBuilderAndSolver = pNewBuilderAndSolver; }; /** * @brief Get method for the builder and solver * @return mpBuilderAndSolver: The pointer to the builder and solver considered */ typename TBuilderAndSolverType::Pointer GetBuilderAndSolver() { return mpBuilderAndSolver; }; /** * @brief This method sets the flag mInitializeWasPerformed * @param InitializePerformedFlag The flag that tells if the initialize has been computed */ void SetInitializePerformedFlag(bool InitializePerformedFlag = true) { mInitializeWasPerformed = InitializePerformedFlag; } /** * @brief This method gets the flag mInitializeWasPerformed * @return mInitializeWasPerformed: The flag that tells if the initialize has been computed */ bool GetInitializePerformedFlag() { return mInitializeWasPerformed; } /** * @brief This method sets the flag mCalculateReactionsFlag * @param CalculateReactionsFlag The flag that tells if the reactions are computed */ void SetCalculateReactionsFlag(bool CalculateReactionsFlag) { mCalculateReactionsFlag = CalculateReactionsFlag; } /** * @brief This method returns the flag mCalculateReactionsFlag * @return The flag that tells if the reactions are computed */ bool GetCalculateReactionsFlag() { return mCalculateReactionsFlag; } /** * @brief This method sets the flag mFullUpdateFlag * @param UseOldStiffnessInFirstIterationFlag The flag that tells if */ void SetUseOldStiffnessInFirstIterationFlag(bool UseOldStiffnessInFirstIterationFlag) { mUseOldStiffnessInFirstIteration = UseOldStiffnessInFirstIterationFlag; } /** * @brief This method returns the flag mFullUpdateFlag * @return The flag that tells if */ bool GetUseOldStiffnessInFirstIterationFlag() { return mUseOldStiffnessInFirstIteration; } /** * @brief This method sets the flag mReformDofSetAtEachStep * @param Flag The flag that tells if each time step the system is rebuilt */ void SetReformDofSetAtEachStepFlag(bool Flag) { mReformDofSetAtEachStep = Flag; GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } /** * @brief This method returns the flag mReformDofSetAtEachStep * @return The flag that tells if each time step the system is rebuilt */ bool GetReformDofSetAtEachStepFlag() { return mReformDofSetAtEachStep; } /** * @brief This method sets the flag mMaxIterationNumber * @param MaxIterationNumber This is the maximum number of on linear iterations */ void SetMaxIterationNumber(unsigned int MaxIterationNumber) { mMaxIterationNumber = MaxIterationNumber; } /** * @brief This method gets the flag mMaxIterationNumber * @return mMaxIterationNumber: This is the maximum number of on linear iterations */ unsigned int GetMaxIterationNumber() { return mMaxIterationNumber; } /** * @brief It sets the level of echo for the solving strategy * @param Level The level to set * @details The different levels of echo are: * - 0: Mute... no echo at all * - 1: Printing time and basic informations * - 2: Printing linear solver data * - 3: Print of debug informations: Echo of stiffness matrix, Dx, b... */ void SetEchoLevel(int Level) override { BaseType::mEchoLevel = Level; GetBuilderAndSolver()->SetEchoLevel(Level); } //********************************************************************************* /**OPERATIONS ACCESSIBLE FROM THE INPUT: **/ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY const DataCommunicator &r_comm = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator(); //OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions //if the operations needed were already performed this does nothing if (mInitializeWasPerformed == false) Initialize(); //initialize solution step if (mSolutionStepIsInitialized == false) InitializeSolutionStep(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet(); GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); // Applying constraints if needed auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints(); const int local_number_of_constraints = r_constraints_array.size(); const int global_number_of_constraints = r_comm.SumAll(local_number_of_constraints); if(global_number_of_constraints != 0) { const auto& r_process_info = BaseType::GetModelPart().GetProcessInfo(); const auto it_const_begin = r_constraints_array.begin(); #pragma omp parallel for for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i) (it_const_begin + i)->ResetSlaveDofs(r_process_info); #pragma omp parallel for for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i) (it_const_begin + i)->Apply(r_process_info); // The following is needed since we need to eventually compute time derivatives after applying // Master slave relations TSparseSpace::SetToZero(rDx); this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); } // Move the mesh if needed if (this->MoveMeshFlag() == true) BaseType::MoveMesh(); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; if (mInitializeWasPerformed == false) { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TConvergenceCriteriaType::Pointer p_convergence_criteria = mpConvergenceCriteria; //Initialize The Scheme - OPERATIONS TO BE DONE ONCE if (p_scheme->SchemeIsInitialized() == false) p_scheme->Initialize(BaseType::GetModelPart()); //Initialize The Elements - OPERATIONS TO BE DONE ONCE if (p_scheme->ElementsAreInitialized() == false) p_scheme->InitializeElements(BaseType::GetModelPart()); //Initialize The Conditions - OPERATIONS TO BE DONE ONCE if (p_scheme->ConditionsAreInitialized() == false) p_scheme->InitializeConditions(BaseType::GetModelPart()); //initialisation of the convergence criteria if (p_convergence_criteria->IsInitialized() == false) p_convergence_criteria->Initialize(BaseType::GetModelPart()); mInitializeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Clears the internal storage */ void Clear() override { KRATOS_TRY; // Setting to zero the internal flag to ensure that the dof sets are recalculated. Also clear the linear solver stored in the B&S auto p_builder_and_solver = GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { p_builder_and_solver->SetDofSetIsInitializedFlag(false); p_builder_and_solver->Clear(); } // Clearing the system of equations if (mpA != nullptr) SparseSpaceType::Clear(mpA); if (mpDx != nullptr) SparseSpaceType::Clear(mpDx); if (mpb != nullptr) SparseSpaceType::Clear(mpb); // Clearing scheme auto p_scheme = GetScheme(); if (p_scheme != nullptr) { GetScheme()->Clear(); } mInitializeWasPerformed = false; mSolutionStepIsInitialized = false; KRATOS_CATCH(""); } /** * @brief This should be considered as a "post solution" convergence check which is useful for coupled analysis - the convergence criteria used is the one used inside the "solve" step */ bool IsConverged() override { KRATOS_TRY; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); GetBuilderAndSolver()->BuildRHS(GetScheme(), BaseType::GetModelPart(), rb); } return mpConvergenceCriteria->PostCriteria(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); KRATOS_CATCH(""); } /** * @brief This operations should be called before printing the results when non trivial results * (e.g. stresses) * Need to be calculated given the solution of the step * @details This operations should be called only when needed, before printing as it can involve a non * negligible cost */ void CalculateOutputData() override { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; GetScheme()->CalculateOutputData(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); } /** * @brief Performs all the required operations that should be done (for each step) before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { KRATOS_TRY; if (!mSolutionStepIsInitialized) { // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); ModelPart& r_model_part = BaseType::GetModelPart(); //set up the system, operation performed just once unless it is required //to reform the dof set at each iteration BuiltinTimer system_construction_time; if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false || mReformDofSetAtEachStep == true) { //setting up the list of the DOFs to be solved BuiltinTimer setup_dofs_time; p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup Dofs Time: " << setup_dofs_time.ElapsedSeconds() << std::endl; //shaping correctly the system BuiltinTimer setup_system_time; p_builder_and_solver->SetUpSystem(r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup System Time: " << setup_system_time.ElapsedSeconds() << std::endl; //setting up the Vectors involved to the correct size BuiltinTimer system_matrix_resize_time; p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb, r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Matrix Resize Time: " << system_matrix_resize_time.ElapsedSeconds() << std::endl; } KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Construction Time: " << system_construction_time.ElapsedSeconds() << std::endl; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; // Initial operations ... things that are constant over the Solution Step p_builder_and_solver->InitializeSolutionStep(r_model_part, rA, rDx, rb); // Initial operations ... things that are constant over the Solution Step p_scheme->InitializeSolutionStep(r_model_part, rA, rDx, rb); // Initialisation of the convergence criteria if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } mpConvergenceCriteria->InitializeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); if (mpConvergenceCriteria->GetActualizeRHSflag() == true) TSparseSpace::SetToZero(rb); mSolutionStepIsInitialized = true; } KRATOS_CATCH(""); } /** * @brief Performs all the required operations that should be done (for each step) after solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void FinalizeSolutionStep() override { KRATOS_TRY; ModelPart& r_model_part = BaseType::GetModelPart(); typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //Finalisation of the solution step, //operations to be done after achieving convergence, for example the //Final Residual Vector (mb) has to be saved in there //to avoid error accumulation p_scheme->FinalizeSolutionStep(r_model_part, rA, rDx, rb); p_builder_and_solver->FinalizeSolutionStep(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); //Cleaning memory after the solution p_scheme->Clean(); //reset flags for next step mSolutionStepIsInitialized = false; if (mReformDofSetAtEachStep == true) //deallocate the systemvectors { this->Clear(); } KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { // Pointers needed in the solution ModelPart& r_model_part = BaseType::GetModelPart(); typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); bool is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // Function to perform the building and the solving phase. if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); if (mUseOldStiffnessInFirstIteration){ p_builder_and_solver->BuildAndSolveLinearizedOnPreviousIteration(p_scheme, r_model_part, rA, rDx, rb,BaseType::MoveMeshFlag()); } else { p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); // Dx = 0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { if (mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } //Iteration Cycle... performed only for NonLinearProblems while (is_converged == false && iteration_number++ < mMaxIterationNumber) { //setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (BaseType::mRebuildLevel > 1 || BaseType::mStiffnessMatrixIsBuilt == false) { if (GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged == true) { if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } //plots a warning if the maximum number of iterations is exceeded if (iteration_number >= mMaxIterationNumber) { MaxIterationsExceeded(); } else { KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << mMaxIterationNumber << " iterations" << std::endl; } //recalculate residual if needed //(note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } //calculate reactions if required if (mCalculateReactionsFlag == true) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } /** * @brief Function to perform expensive checks. * @details It is designed to be called ONCE to verify that the input is correct. */ int Check() override { KRATOS_TRY BaseType::Check(); GetBuilderAndSolver()->Check(BaseType::GetModelPart()); GetScheme()->Check(BaseType::GetModelPart()); mpConvergenceCriteria->Check(BaseType::GetModelPart()); return 0; KRATOS_CATCH("") } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "newton_raphson_strategy", "use_old_stiffness_in_first_iteration": false, "max_iteration" : 10, "reform_dofs_at_each_step" : false, "compute_reactions" : false, "builder_and_solver_settings" : {}, "convergence_criteria_settings" : {}, "linear_solver_settings" : {}, "scheme_settings" : {} })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "newton_raphson_strategy"; } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ /** * @brief This method returns the LHS matrix * @return The LHS matrix */ TSystemMatrixType &GetSystemMatrix() override { TSystemMatrixType &mA = *mpA; return mA; } /** * @brief This method returns the RHS vector * @return The RHS vector */ TSystemVectorType& GetSystemVector() override { TSystemVectorType& mb = *mpb; return mb; } /** * @brief This method returns the solution vector * @return The Dx vector */ TSystemVectorType& GetSolutionVector() override { TSystemVectorType& mDx = *mpDx; return mDx; } /** * @brief Set method for the flag mKeepSystemConstantDuringIterations * @param Value If we consider constant the system of equations during the iterations */ void SetKeepSystemConstantDuringIterations(bool Value) { mKeepSystemConstantDuringIterations = Value; } /** * @brief Get method for the flag mKeepSystemConstantDuringIterations * @return True if we consider constant the system of equations during the iterations, false otherwise */ bool GetKeepSystemConstantDuringIterations() { return mKeepSystemConstantDuringIterations; } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedNewtonRaphsonStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} private: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} protected: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename TSchemeType::Pointer mpScheme = nullptr; /// The pointer to the time scheme employed typename TBuilderAndSolverType::Pointer mpBuilderAndSolver = nullptr; /// The pointer to the builder and solver employed typename TConvergenceCriteriaType::Pointer mpConvergenceCriteria = nullptr; /// The pointer to the convergence criteria employed TSystemVectorPointerType mpDx; /// The increment in the solution TSystemVectorPointerType mpb; /// The RHS vector of the system of equations TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations /** * @brief Flag telling if it is needed to reform the DofSet at each solution step or if it is possible to form it just once * @details Default = false - true : Reforme at each time step - false : Form just one (more efficient) */ bool mReformDofSetAtEachStep; /** * @brief Flag telling if it is needed or not to compute the reactions * @details default = true */ bool mCalculateReactionsFlag; /** * @brief Flag telling if a full update of the database will be performed at the first iteration * @details default = false */ bool mUseOldStiffnessInFirstIteration = false; bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step unsigned int mMaxIterationNumber; /// The maximum number of iterations, 30 by default bool mInitializeWasPerformed; /// Flag to set as initialized the strategy bool mKeepSystemConstantDuringIterations; // Flag to allow keeping system matrix constant during iterations ///@} ///@name Private Operators ///@{ /** * @brief Here the database is updated * @param A The LHS matrix of the system of equations * @param Dx The incremement in the solution * @param b The RHS vector of the system of equations * @param MoveMesh The flag that allows to move the mesh */ virtual void UpdateDatabase( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, const bool MoveMesh) { typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); p_scheme->Update(BaseType::GetModelPart(), p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Move the mesh if needed if (MoveMesh == true) BaseType::MoveMesh(); } /** * @brief This method returns the components of the system of equations depending of the echo level * @param IterationNumber The non linear iteration in the solution loop */ virtual void EchoInfo(const unsigned int IterationNumber) { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (this->GetEchoLevel() == 2) //if it is needed to print the debug info { KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } else if (this->GetEchoLevel() == 3) //if it is needed to print the debug info { KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl; KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } else if (this->GetEchoLevel() == 4) //print to matrix market file { std::stringstream matrix_market_name; matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm"; TSparseSpace::WriteMatrixMarketMatrix((char *)(matrix_market_name.str()).c_str(), rA, false); std::stringstream matrix_market_vectname; matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm.rhs"; TSparseSpace::WriteMatrixMarketVector((char *)(matrix_market_vectname.str()).c_str(), rb); std::stringstream matrix_market_dxname; matrix_market_dxname << "dx_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm.rhs"; TSparseSpace::WriteMatrixMarketVector((char *)(matrix_market_dxname.str()).c_str(), rDx); std::stringstream dof_data_name; unsigned int rank=BaseType::GetModelPart().GetCommunicator().MyPID(); dof_data_name << "dofdata_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << "_rank_"<< rank << ".csv"; WriteDofInfo(dof_data_name.str(), rDx); } } /** * @brief This method prints information after reach the max number of iterations */ virtual void MaxIterationsExceeded() { KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0) << "ATTENTION: max iterations ( " << mMaxIterationNumber << " ) exceeded!" << std::endl; } /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); mMaxIterationNumber = ThisParameters["max_iteration"].GetInt(); mReformDofSetAtEachStep = ThisParameters["reform_dofs_at_each_step"].GetBool(); mCalculateReactionsFlag = ThisParameters["compute_reactions"].GetBool(); mUseOldStiffnessInFirstIteration = ThisParameters["use_old_stiffness_in_first_iteration"].GetBool(); // Saving the convergence criteria to be used if (ThisParameters["convergence_criteria_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } // Saving the scheme if (ThisParameters["scheme_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } // Setting up the default builder and solver if (ThisParameters["builder_and_solver_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } } void WriteDofInfo(std::string FileName, const TSystemVectorType& rDX) { std::ofstream out(FileName); out.precision(15); out << "EquationId,NodeId,VariableName,IsFixed,Value,coordx,coordy,coordz" << std::endl; for(const auto& rdof : GetBuilderAndSolver()->GetDofSet()) { const auto& coords = BaseType::GetModelPart().Nodes()[rdof.Id()].Coordinates(); out << rdof.EquationId() << "," << rdof.Id() << "," << rdof.GetVariable().Name() << "," << rdof.IsFixed() << "," << rdof.GetSolutionStepValue() << "," << "," << coords[0] << "," << coords[1] << "," << coords[2]<< "\n"; } out.close(); } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonStrategy(const ResidualBasedNewtonRaphsonStrategy &Other){}; ///@} }; /* Class ResidualBasedNewtonRaphsonStrategy */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos. */ #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY defined */
special_ops.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #pragma once #include <ops/ops.h> #include <loops/reduce_float.h> #include <loops/reduce_same.h> #include <loops/scalar.h> #include <loops/indexreduce.h> #include <loops/broadcasting.h> #include <loops/transform_float.h> #include <op_enums.h> #include <loops/transform_strict.h> #ifdef __CUDACC__ #include <loops/cuda/inplace_loops/reduce_same_inplace.h> #include <loops/cuda/inplace_loops/transform_strict_inplace.h> #include <loops/cuda/inplace_loops/scalar_inplace.h> #endif namespace functions { namespace broadcast { template <typename X, typename Y, typename Z> class Broadcast; } namespace transform { template <typename X> class TransformStrict; } namespace scalar { } namespace reduce { template <typename X, typename Z> class ReduceFloatFunction; template <typename X> class ReduceSameFunction; } } namespace simdOps { template<typename T, typename Z> class Pooling2D { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int outSize(int size, int k, int s, int p, bool coverAll) { if (coverAll) return (size + p * 2 - k + s - 1) / s + 1; else return (size + p * 2 - k) / s + 1; } #ifdef __CUDACC__ /** * Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *zShapeBuffer, Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ int kH; __shared__ int kW; __shared__ int sH; __shared__ int sW; __shared__ int pH; __shared__ int pW; __shared__ int dH; __shared__ int dW; __shared__ int poolingMode; __shared__ Z extraParam0; __shared__ int batchSize; __shared__ int inChannels; __shared__ int outH; __shared__ int outW; __shared__ int inH; __shared__ int inW; //__shared__ int *strideIn; //__shared__ int *strideOut; __shared__ int strideB; __shared__ int strideC; __shared__ int strideY; __shared__ int strideX; __shared__ int strideOB; __shared__ int strideOC; __shared__ int strideOY; __shared__ int strideOX; __shared__ int length; __shared__ int kHEff; __shared__ int kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { kH = (int)extraParams[0]; kW = (int)extraParams[1]; sH = (int)extraParams[2]; sW = (int)extraParams[3]; pH = (int)extraParams[4]; pW = (int)extraParams[5]; dH = (int)extraParams[6]; //Dilation, height dimension dW = (int)extraParams[7]; //Dilation, width dimension poolingMode = (int)extraParams[9]; extraParam0 = extraParams[10]; batchSize = shape::sizeAt(xShapeBuffer, 0); inChannels = shape::sizeAt(xShapeBuffer, 1); outH = shape::sizeAt(zShapeBuffer, 2); outW = shape::sizeAt(zShapeBuffer, 3); inH = shape::sizeAt(xShapeBuffer, 2); inW = shape::sizeAt(xShapeBuffer, 3); strideB = shape::stride(xShapeBuffer)[0]; strideC = shape::stride(xShapeBuffer)[1]; strideY = shape::stride(xShapeBuffer)[2]; strideX = shape::stride(xShapeBuffer)[3]; strideOB = shape::stride(zShapeBuffer)[0]; strideOC = shape::stride(zShapeBuffer)[1]; strideOY = shape::stride(zShapeBuffer)[2]; strideOX = shape::stride(zShapeBuffer)[3]; length = shape::length(zShapeBuffer); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); fOrder = shape::order(zShapeBuffer) == 'f'; /* if (blockIdx.x == 0) { printf("kH: %i; kW: %i; sH: %i; sW: %i; pH: %i; pW: %i; dH: %i; dW: %i; poolingMode: %i; extraParam0: %f;\n", kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, (float) extraParam0); printf("batchSize: %i; inChannels: %i; outH: %i; outW: %i; inH: %i; inW: %i; strideB: %i; strideC: %i; strideY: %i; strideX: %i;\n", batchSize, inChannels, outH, outW, inH, inW, strideB, strideC, strideY, strideX); } */ } __syncthreads(); int tid = blockIdx.x * gridDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % outW; const int ph = (index / outW) % outH; const int c = (index / outW / outH) % inChannels; const int n = index / outW / outH / inChannels; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; // const int hSO = hstart; // const int hEO = hend; if(hstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > inH){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-inH) / (Z) dH); hend -= f * dH; } if(wend > inW){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-inW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z sum = poolingMode == 0 ? -nd4j::DataTypeUtils::max<Z>() : static_cast<Z>(0.f); T *input_slice = dx + (n * strideB + c * strideC); if (poolingMode == 0) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { Z v = static_cast<Z>(input_slice[h * strideY + w * strideX]); if (v > sum) sum = v; } } } else if (poolingMode == 1) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { sum += static_cast<Z>(input_slice[h * strideY + w * strideX]); } } } else if (poolingMode == 2) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { sum += nd4j::math::nd4j_pow<Z,Z,Z>(static_cast<Z>(nd4j::math::nd4j_abs<T>(input_slice[h * strideY + w * strideX])), extraParam0); } } } Z res; if (poolingMode == 0) { res = sum; } else if (poolingMode == 1) { int divide_factor = pool_size; //Case 0: exclude padding if ((int) extraParam0 == 1) //Case 1: include padding divide_factor = kH * kW; res = sum / static_cast<Z>(divide_factor); } else if (poolingMode == 2) { res = nd4j::math::nd4j_pow<Z,Z,Z>(sum, (Z) 1.0f / extraParam0); } if (!fOrder) { result[index] = res; } else { result[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = res; } /* if (index >= 0 && index < 400000) { printf("index: %i; hstart: %i; hend: %i; wstart: %i; wend: %i; ph: %i; pw: %i; hstart_orig: %i; hend_orig: %i;\n", index, hstart, hend, wstart, wend, ph, pw, hSO, hEO); } */ } __syncthreads(); } #endif static void execSpecial(T *in, Nd4jLong *inShapeBuffer, Z *out, Nd4jLong *outShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const Nd4jLong kH = (int)extraParams[0]; const Nd4jLong kW = (int)extraParams[1]; const Nd4jLong sH = (int)extraParams[2]; const Nd4jLong sW = (int)extraParams[3]; const Nd4jLong pH = (int)extraParams[4]; const Nd4jLong pW = (int)extraParams[5]; const Nd4jLong dH = (int)extraParams[6]; const Nd4jLong dW = (int)extraParams[7]; Nd4jLong poolingMode = (int)extraParams[9]; T extraParam0 = extraParams[10]; if(dH == 0 || dW == 0) { printf("Special_ops pooling2d:: dilation must not be zero, but got instead {%lld, %lld} \n", dH, dW); throw ""; } const Nd4jLong kHEff = kH + (kH-1)*(dH-1); const Nd4jLong kWEff = kW + (kW-1)*(dW-1); const int bS = shape::sizeAt(inShapeBuffer, 0); const int iC = shape::sizeAt(inShapeBuffer, 1); const int iH = shape::sizeAt(inShapeBuffer, 2); const int iW = shape::sizeAt(inShapeBuffer, 3); const int oH = shape::sizeAt(outShapeBuffer, 2); const int oW = shape::sizeAt(outShapeBuffer, 3); const Nd4jLong iStride0 = shape::stride(inShapeBuffer)[0]; const Nd4jLong iStride1 = shape::stride(inShapeBuffer)[1]; const Nd4jLong iStride2 = shape::stride(inShapeBuffer)[2]; const Nd4jLong iStride3 = shape::stride(inShapeBuffer)[3]; const Nd4jLong oStride0 = shape::stride(outShapeBuffer)[0]; const Nd4jLong oStride1 = shape::stride(outShapeBuffer)[1]; const Nd4jLong oStride2 = shape::stride(outShapeBuffer)[2]; const Nd4jLong oStride3 = shape::stride(outShapeBuffer)[3]; const Nd4jLong iStep2 = dH*iStride2; const Nd4jLong iStep3 = dW*iStride3; const int kProd = kH*kW; const T iStep2Inv = 1./iStep2; const T iStep3Inv = 1./iStep3; Nd4jLong hstart, wstart, hend, wend; T sum, *pIn; if(poolingMode == 0) { // max #pragma omp parallel for schedule(guided) private(pIn, sum, hstart, wstart, hend, wend) for(int b = 0; b < bS; ++b) { for(int c = 0; c < iC; ++c) { for(int oh = 0; oh < oH; ++oh) { for(int ow = 0; ow < oW; ++ow) { pIn = in + b * iStride0 + c * iStride1; hstart = oh * sH - pH; wstart = ow * sW - pW; hend = hstart + kHEff; wend = wstart + kWEff; if(hstart < 0) hstart += dH * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(-hstart) / static_cast<T>(dH)); if(wstart < 0) wstart += dW * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(-wstart) / static_cast<T>(dW)); if(hend > iH) hend -= dH * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(hend-iH) / static_cast<T>(dH)); if(wend > iW) wend -= dW * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(wend-iW) / static_cast<T>(dW)); hstart *= iStride2; hend *= iStride2; wstart *= iStride3; wend *= iStride3; sum = -nd4j::DataTypeUtils::max<Z>(); for (Nd4jLong kh = hstart; kh < hend; kh += iStep2) for (Nd4jLong kw = wstart; kw < wend; kw += iStep3) { T val = pIn[kh + kw]; if (val > sum) sum = val; } out[b * oStride0 + c * oStride1 + oh * oStride2 + ow * oStride3] = sum; } } } } } /*************************************************************************/ else if(poolingMode == 1) { // avg #pragma omp parallel for schedule(guided) private(pIn, sum, hstart, wstart, hend, wend) for(int b = 0; b < bS; ++b) { for(int c = 0; c < iC; ++c) { for(int oh = 0; oh < oH; ++oh) { for(int ow = 0; ow < oW; ++ow) { pIn = in + b * iStride0 + c * iStride1; hstart = oh * sH - pH; wstart = ow * sW - pW; hend = hstart + kHEff; wend = wstart + kWEff; if(hstart < 0) hstart += dH * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(-hstart) / static_cast<T>(dH)); if(wstart < 0) wstart += dW * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(-wstart) / static_cast<T>(dW)); if(hend > iH) hend -= dH * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(hend-iH) / static_cast<T>(dH)); if(wend > iW) wend -= dW * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(wend-iW) / static_cast<T>(dW)); hstart *= iStride2; hend *= iStride2; wstart *= iStride3; wend *= iStride3; sum = static_cast<Z>(0.); for (Nd4jLong kh = hstart; kh < hend; kh += iStep2) for (Nd4jLong kw = wstart; kw < wend; kw += iStep3) sum += pIn[kh + kw]; if ((int) extraParam0 == 0) //Exclude padding sum /= static_cast<T>(nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend-hstart) / static_cast<double>(iStep2))) * static_cast<T>(nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend-wstart) / static_cast<double>(iStep3))); //Accounts for dilation else if ((int) extraParam0 == 1) //Include padding sum /= kProd; out[b * oStride0 + c * oStride1 + oh * oStride2 + ow * oStride3] = sum; } } } } } /*************************************************************************/ else if(poolingMode == 2) { // pnorm #pragma omp parallel for schedule(guided) private(pIn, sum, hstart, wstart, hend, wend) for(int b = 0; b < bS; ++b) { for(int c = 0; c < iC; ++c) { for(int oh = 0; oh < oH; ++oh) { for(int ow = 0; ow < oW; ++ow) { pIn = in + b * iStride0 + c * iStride1; hstart = oh * sH - pH; wstart = ow * sW - pW; hend = hstart + kHEff; wend = wstart + kWEff; if(hstart < 0) hstart += dH * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(-hstart) / static_cast<T>(dH)); if(wstart < 0) wstart += dW * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(-wstart) / static_cast<T>(dW)); if(hend > iH) hend -= dH * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(hend-iH) / static_cast<T>(dH)); if(wend > iW) wend -= dW * (Nd4jLong)nd4j::math::nd4j_ceil<T,Nd4jLong>(static_cast<T>(wend-iW) / static_cast<T>(dW)); hstart *= iStride2; hend *= iStride2; wstart *= iStride3; wend *= iStride3; sum = static_cast<T>(0.); for (Nd4jLong kh = hstart; kh < hend; kh += iStep2) for (Nd4jLong kw = wstart; kw < wend; kw += iStep3) sum += nd4j::math::nd4j_pow<T, T, T>(nd4j::math::nd4j_abs<T>(pIn[kh + kw]), extraParam0); sum = nd4j::math::nd4j_pow<T,T,T>(sum, (T) 1. / extraParam0); out[b * oStride0 + c * oStride1 + oh * oStride2 + ow * oStride3] = sum; } } } } } else { nd4j_printf("Special_ops::pooling2d: pooling mode argument can take three values only: 0, 1, 2, but got %i instead !\n", poolingMode); throw ""; } } op_def static T op(T d1, Z *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** * A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; FORCEINLINE bool is_a_ge_zero_and_a_lt_b(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } template<typename T> class Im2col { public: static const bool requiresSpecial = true; static _CUDA_HD int outSize(int size, int k, int s, int p, bool coverAll) { if (coverAll) return (size + p * 2 - k + s - 1) / s + 1; else return (size + p * 2 - k) / s + 1; } #ifdef __CUDACC__ /** * Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/ int kernelHeight = (int)extraParams[0]; int kernelWidth = (int)extraParams[1]; int strideY = (int)extraParams[2]; int strideX = (int)extraParams[3]; int padHeight = (int)extraParams[4]; int padWidth = (int)extraParams[5]; int dY = (int)extraParams[6]; //Dilation, height/y dimension int dX = (int)extraParams[7]; //Dilation, width/x dimension int kSize = kernelWidth * kernelHeight; T zeroPadVal = (T)extraParams[9]; //Value to use when value is padding. Usually 0 but not always auto outShape = shape::shapeOf(zShapeBuffer); auto resultOrder = shape::order(zShapeBuffer); auto outStride = shape::stride(zShapeBuffer); auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); int samples = inShape[0]; int depth = inShape[1]; int height = inShape[2]; int width = inShape[3]; int strideex = inStride[0]; int stridech = inStride[1]; int strideh = inStride[2]; int stridew = inStride[3]; // (height + 2 * padHeight - kernelHeight) / strideX + 1; // // (width + 2 * padWidth - kernelWidth) / strideY + 1; // int height_col = outShape[4]; int width_col = outShape[5]; int n = samples * depth * height_col * width_col; /* if (threadIdx.x == 0) printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n", kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, height, width, depth, n, samples); */ int index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < n; index += blockDim.x*gridDim.x) { int h_index = index / width_col; int h_col = h_index % height_col; int w_col = index % width_col; int c_im = h_index / height_col; int c_col = c_im * kSize; int depth_im = c_im % depth; int num_im = c_im / depth; int h_offset = h_col * strideY - padHeight; int w_offset = w_col * strideX - padWidth; T* data_col_ptr = result; int i_c = (c_col * height_col + h_col) * width_col + w_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; T* data_im_ptr = dx; data_im_ptr += num_im * strideex + depth_im * stridech + h_offset * strideh + w_offset*stridew; for (int i = 0; i < kernelHeight; ++i) { for (int j = 0; j < kernelWidth; ++j) { int h_im = h_offset + i * dY; int w_im = w_offset + j * dX; int i_f = 0; int i_c_temp = i_c; for (int dim = 5; dim >= 0; dim--) { i_f += (i_c_temp % outShape[dim]) * outStride[dim]; i_c_temp = i_c_temp / outShape[dim]; } if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width){ result[i_f] = data_im_ptr[i * dY * strideh + j * dX * stridew]; } else result[i_f] = zeroPadVal; //result[i_f] = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * strideh + j*stridew] : 0; data_col_ptr += height_col * width_col; i_c += height_col * width_col; } } } } #endif static void execSpecial( T *imBuff, Nd4jLong *imShapeBuffer, T *colBuff, Nd4jLong *colShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/ // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] int kH = (int)extraParams[0]; int kW = (int)extraParams[1]; int sH = (int)extraParams[2]; int sW = (int)extraParams[3]; int pH = (int)extraParams[4]; int pW = (int)extraParams[5]; int dH = (int)extraParams[6]; //Dilation, height/y dimension int dW = (int)extraParams[7]; //Dilation, width/x dimension T zeroPadVal = extraParams[9]; auto colShape = shape::shapeOf(colShapeBuffer); auto colStride = shape::stride(colShapeBuffer); auto imShape = shape::shapeOf(imShapeBuffer); auto imStride = shape::stride(imShapeBuffer); const int bS = imShape[0]; const int iC = imShape[1]; const int iH = imShape[2]; const int iW = imShape[3]; const int oH = colShape[4]; const int oW = colShape[5]; const Nd4jLong colStride0 = colStride[0]; const Nd4jLong colStride1 = colStride[1]; const Nd4jLong colStride2 = colStride[2]; const Nd4jLong colStride3 = colStride[3]; const Nd4jLong colStride4 = colStride[4]; const Nd4jLong colStride5 = colStride[5]; const Nd4jLong imStride0 = imStride[0]; const Nd4jLong imStride1 = imStride[1]; const Nd4jLong imStride2 = imStride[2]; const Nd4jLong imStride3 = imStride[3]; T *col, *im; int imRow, imCol; if (shape::order(imShapeBuffer) == 'c' && shape::order(colShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(imShapeBuffer) && shape::strideDescendingCAscendingF(colShapeBuffer)) { #pragma omp parallel for schedule(static) proc_bind(close) private(col, im, imRow, imCol) for (int b = 0; b < bS; b++) { for (int c = 0; c < iC; ++c) { for (int kRow = 0; kRow < kH; ++kRow) { for (int kCol = 0; kCol < kW; ++kCol) { for (int colH = 0; colH < oH; ++colH) { for (int colW = 0; colW < oW; ++colW) { imRow = (-pH + kRow * dH) + colH*sH; imCol = (-pW + kCol * dW) + colW*sW; col = colBuff + b*colStride0 + c*colStride1 + kRow*colStride2 + kCol*colStride3 + colH*colStride4 + colW*colStride5; im = imBuff + b*imStride0 + c*imStride1 + imRow*imStride2 + imCol*imStride3; if (static_cast<unsigned>(imRow) >= static_cast<unsigned>(iH) || static_cast<unsigned>(imCol) >= static_cast<unsigned>(iW)) *col = zeroPadVal; else *col = *im; } } } } } } } else { #pragma omp parallel for schedule(static) proc_bind(close) private(im, col, imRow, imCol) for (int b = 0; b < bS; b++) { for (int colH = 0; colH < oH; ++colH) { for (int colW = 0; colW < oW; ++colW) { for (int c = 0; c < iC; ++c) { for (int kRow = 0; kRow < kH; ++kRow) { for (int kCol = 0; kCol < kW; ++kCol) { imRow = (-pH + kRow * dH) + colH*sH; imCol = (-pW + kCol * dW) + colW*sW; col = colBuff + b*colStride0 + c*colStride1 + kRow*colStride2 + kCol*colStride3 + colH*colStride4 + colW*colStride5; im = imBuff + b*imStride0 + c*imStride1 + imRow*imStride2 + imCol*imStride3; if (static_cast<unsigned>(imRow) >= static_cast<unsigned>(iH) || static_cast<unsigned>(imCol) >= static_cast<unsigned>(iW)) *col = zeroPadVal; else *col = *im; } } } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** * A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; template<typename T, typename Z> class Histogram { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *zShapeBuffer, Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int numBins = (int) extraParams[0]; Z min_val = extraParams[1]; Z max_val = extraParams[2]; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ Z *bins; __shared__ int length; __shared__ Z *reductor; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; bins = (Z *) shmem; reductor = ((Z *) allocationPointer) + (numBins * blockIdx.x); length = shape::length(xShapeBuffer); } __syncthreads(); Z binSize = (max_val - min_val) / (numBins); for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] = (Z) 0.0f; } __syncthreads(); for (int e = tid; e < length; e+= blockDim.x * gridDim.x) { int idx = (int) ((dx[e] - min_val) / binSize); if (idx < 0) idx = 0; else if (idx >= numBins) idx = numBins - 1; nd4j::math::atomics::nd4j_atomicAdd(&bins[idx], (Z) 1.0f); } __syncthreads(); // transfer shared memory to reduction memory if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionPointer; __shared__ bool amLast; for (int e = threadIdx.x; e < numBins; e += blockDim.x) { reductor[e] = bins[e]; } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; // nullify shared memory for future accumulation for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] = (Z) 0.0f; } // accumulate reduced bins for (int r = 0; r < gridDim.x; r++) { Z *ptrBuf = ((Z *)allocationPointer) + (r * numBins); for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] += ptrBuf[e]; } } __syncthreads(); // write them out to Z for (int e = threadIdx.x; e < numBins; e += blockDim.x) { result[e] = bins[e]; } } } else { // if there's only 1 block - just write away data for (int e = threadIdx.x; e < numBins; e += blockDim.x) { result[e] = bins[e]; } } }; #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *zShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int length = shape::length(xShapeBuffer); int _threads = 2; int numBins = (int) extraParams[0]; int span = (length / _threads) + 8; // get min over input T min_val = extraParams[1]; T max_val = extraParams[2]; /* #pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(min:min_val) proc_bind(close) for (int x = 0; x < length; x++) { if (min_val > dx[x]) min_val = dx[x]; } // get max over input T max_val = (T) MIN_FLOAT; #pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(max:max_val) proc_bind(close) for (int x = 0; x < length; x++) { if (max_val < dx[x]) max_val = dx[x]; } */ T binSize = (max_val - min_val) / (numBins); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(close) default(shared) { int tid, start, end; int *bins = new int[numBins]; std::memset(bins, 0, sizeof(int) * numBins); tid = omp_get_thread_num(); start = span * tid; end = span * (tid + 1); if (end > length) end = length; #pragma omp simd for (int x = start; x < end; x++) { int idx = (int) ((dx[x] - min_val) / binSize); if (idx < 0) idx = 0; else if (idx >= numBins) idx = numBins - 1; bins[idx]++; } #pragma omp critical { #pragma omp simd for (int x = 0; x < numBins; x++) { result[x] += bins[x]; } } delete[] bins; } } op_def static T op(T d1, Z *params) { return d1; } }; template<typename X> class Col2Im { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * https://github.com/pjreddie/darknet/blob/master/src/col2im_kernels.cu */ static inline __device__ void execSpecialCuda( X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *zShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); int strideex = inStride[0]; int stridech = inStride[1]; int stridekrow = inStride[2]; int stridekcol = inStride[3]; int striderow = inStride[4]; int stridecol = inStride[5]; int kernelHeight = inShape[2]; int kernelWidth = inShape[3]; // C int strideY = (int)extraParams[0]; int strideX = (int)extraParams[1]; int padHeight = (int)extraParams[2]; int padWidth = (int)extraParams[3]; int imgHeight = (int)extraParams[4]; int imgWidth = (int)extraParams[5]; int dY = (int)extraParams[6]; //Dilation in height/y dimension int dX = (int)extraParams[7]; //Dilation in width/x dimension auto outShape = shape::shapeOf(zShapeBuffer); auto resultOrder = shape::order(zShapeBuffer); auto outStride = shape::stride(zShapeBuffer); int samples = outShape[0]; int depth = outShape[1]; int imgH = outShape[2]; int imgW = outShape[3]; int height_col = inShape[4];//(imgHeight + 2 * padHeight - kernelHeight) / strideX + 1; int width_col = inShape[5];//(imgWidth + 2 * padWidth - kernelWidth) / strideY + 1; int n = samples * depth * imgHeight * imgWidth; /*if (threadIdx.x == 0) printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n", kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, imgHeight, imgWidth, depth, n, samples);*/ //Effective kernel size, accounting for dilation int kEffectiveW = kernelWidth + (kernelWidth - 1) * (dX - 1); int kEffectiveH = kernelHeight + (kernelHeight - 1) * (dY - 1); for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { X val = 0; int w_im = i % imgWidth + padWidth; int h_im = (i / imgWidth) % imgHeight + padHeight; int c_im = i / (imgWidth * imgHeight); int num_im = c_im / depth; int depth_im = c_im % depth; // compute the start and end of the output // These are the indexes for dimensions ??? in the 6d col matrix int w_col_start = (w_im < kEffectiveW) ? 0 : (w_im - kEffectiveW) / strideX + 1; int w_col_end = nd4j::math::nd4j_min<int>(w_im / strideX + 1, width_col); int h_col_start = (h_im < kEffectiveH) ? 0 : (h_im - kEffectiveH) / strideY + 1; int h_col_end = nd4j::math::nd4j_min<int>(h_im / strideY + 1, height_col); //Iterate over col entries in the 6d array... these are added up for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * strideY); int w_k = (w_im - w_col * strideX); if(h_k % dY == 0 && w_k % dX == 0){ h_k /= dY; w_k /= dX; int data_col_index = num_im * strideex + depth_im * stridech + h_k * stridekrow + w_k * stridekcol + h_col * striderow + w_col * stridecol; val += dx[data_col_index]; } } } int i_f = 0; int i_c = i; for (int dim = 3; dim >= 0; dim--) { i_f += (i_c % outShape[dim]) * outStride[dim]; i_c = i_c / outShape[dim]; } result[i_f] = val; } } #endif static void execSpecial( X *colBuff, Nd4jLong *colShapeBuffer, X *imBuff, Nd4jLong *imShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW] auto colShape = shape::shapeOf(colShapeBuffer); auto colStride = shape::stride(colShapeBuffer); auto imShape = shape::shapeOf(imShapeBuffer); auto imStride = shape::stride(imShapeBuffer); const int sH = (int)extraParams[0]; const int sW = (int)extraParams[1]; const int pH = (int)extraParams[2]; const int pW = (int)extraParams[3]; const int iH = (int)extraParams[4]; const int iW = (int)extraParams[5]; const int dH = (int)extraParams[6]; const int dW = (int)extraParams[7]; const int bS = imShape[0]; const int iC = imShape[1]; const int kH = colShape[2]; const int kW = colShape[3]; const int oH = colShape[4]; const int oW = colShape[5]; const Nd4jLong colStride0 = colStride[0]; const Nd4jLong colStride1 = colStride[1]; const Nd4jLong colStride2 = colStride[2]; const Nd4jLong colStride3 = colStride[3]; const Nd4jLong colStride4 = colStride[4]; const Nd4jLong colStride5 = colStride[5]; const Nd4jLong imStride0 = imStride[0]; const Nd4jLong imStride1 = imStride[1]; const Nd4jLong imStride2 = imStride[2]; const Nd4jLong imStride3 = imStride[3]; // initial zeroing of image content const Nd4jLong imEWS = nd4j::math::nd4j_abs<Nd4jLong>(shape::elementWiseStride(imShapeBuffer)); if(imEWS == 1) memset(imBuff, 0, shape::length(imShapeBuffer) * sizeof(X)); else #pragma omp parallel for schedule(static) proc_bind(close) for (int i = 0; i < shape::length(imShapeBuffer) * imEWS; i += imEWS) imBuff[i] = static_cast<X>(0.f); X *col, *im; int imRow, imCol; if (shape::order(colShapeBuffer) == 'c' && shape::order(imShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(colShapeBuffer) && shape::strideDescendingCAscendingF(imShapeBuffer)) { #pragma omp parallel for schedule(static) proc_bind(close) private(col, im, imRow, imCol) for (int b = 0; b < bS; b++) { for (int c = 0; c < iC; ++c) { for (int kRow = 0; kRow < kH; ++kRow) { for (int kCol = 0; kCol < kW; ++kCol) { for (int colH = 0; colH < oH; ++colH) { for (int colW = 0; colW < oW; ++colW) { imRow = (-pH + kRow * dH) + colH*sH; imCol = (-pW + kCol * dW) + colW*sW; col = colBuff + b*colStride0 + c*colStride1 + kRow*colStride2 + kCol*colStride3 + colH*colStride4 + colW*colStride5; im = imBuff + b*imStride0 + c*imStride1 + imRow*imStride2 + imCol*imStride3; if (static_cast<unsigned>(imRow) < static_cast<unsigned>(iH) && static_cast<unsigned>(imCol) < static_cast<unsigned>(iW)) *im += *col; } } } } } } } else { #pragma omp parallel for schedule(static) proc_bind(close) private(im, col, imRow, imCol) for (int b = 0; b < bS; b++) { for (int colH = 0; colH < oH; ++colH) { for (int colW = 0; colW < oW; ++colW) { for (int c = 0; c < iC; ++c) { for (int kRow = 0; kRow < kH; ++kRow) { for (int kCol = 0; kCol < kW; ++kCol) { imRow = (-pH + kRow * dH) + colH*sH; imCol = (-pW + kCol * dW) + colW*sW; col = colBuff + b*colStride0 + c*colStride1 + kRow*colStride2 + kCol*colStride3 + colH*colStride4 + colW*colStride5; im = imBuff + b*imStride0 + c*imStride1 + imRow*imStride2 + imCol*imStride3; if (static_cast<unsigned>(imRow) < static_cast<unsigned>(iH) && static_cast<unsigned>(imCol) < static_cast<unsigned>(iW)) *im += *col; } } } } } } } } op_def static X op(X d1, X *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; template<typename X> class Reverse { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *zShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ Nd4jLong xLength; __shared__ int xEWS; __shared__ char xOrder; __shared__ Nd4jLong sLength; __shared__ X *shmem; int tid = threadIdx.x + blockIdx.x * blockDim.x; if (threadIdx.x == 0) { xLength = shape::length(xShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); xOrder = shape::order(xShapeBuffer); sLength = xLength - 1; extern __shared__ unsigned char shrd[]; shmem = (X *) shrd; } __syncthreads(); if (dx == result) { if (xEWS == 1) { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { Nd4jLong idx = sLength - e; X tmp = dx[e]; dx[e] = dx[idx]; dx[idx] = tmp; } } else if (xEWS >= 1) { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { Nd4jLong idx1 = (sLength - e) * xEWS; Nd4jLong idx2 = e * xEWS; X tmp = dx[idx2]; dx[idx2] = dx[idx1]; dx[idx1] = tmp; } } else { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { auto xOffset = shape::getIndexOffset(e, xShapeBuffer, xLength); auto zOffset = shape::getIndexOffset(sLength - e, xShapeBuffer, xLength); result[zOffset] = dx[xOffset]; } } } else { __shared__ int zEWS; __shared__ char zOrder; if (threadIdx.x == 0) { zEWS = shape::elementWiseStride(zShapeBuffer); zOrder = shape::order(zShapeBuffer); } __syncthreads(); if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) { // loop for whole array for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { result[sLength - e] = dx[e]; } } else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) { for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { result[(sLength - e) * zEWS] = dx[e * xEWS]; } } else { for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { auto xOffset = shape::getIndexOffset(e, xShapeBuffer, xLength); auto zOffset = shape::getIndexOffset(sLength - e, xShapeBuffer, xLength); result[zOffset] = dx[xOffset]; } } } } #endif static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *zShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { Nd4jLong xLength = shape::length(xShapeBuffer); int xEWS = shape::elementWiseStride(xShapeBuffer); char xOrder = shape::order(xShapeBuffer); Nd4jLong sLength = xLength - 1; // two step phase here if (dx == result) { if (xEWS == 1) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { Nd4jLong idx = sLength - e; auto tmp = dx[e]; dx[e] = dx[idx]; dx[idx] = tmp; } } else if (xEWS > 1) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { Nd4jLong idx1 = (sLength - e) * xEWS; Nd4jLong idx2 = e * xEWS; auto tmp = dx[idx2]; dx[idx2] = dx[idx1]; dx[idx1] = tmp; } } else { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { auto xOffset = shape::getIndexOffset(e, xShapeBuffer, xLength); auto zOffset = shape::getIndexOffset(sLength - e, xShapeBuffer, xLength); result[zOffset] = dx[xOffset]; } } } else { // single step phase here auto zEWS = shape::elementWiseStride(zShapeBuffer); auto zOrder = shape::order(zShapeBuffer); if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { result[sLength - e] = dx[e]; } } else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { result[(sLength - e) * zEWS] = dx[e * xEWS]; } } else { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { auto xOffset = shape::getIndexOffset(e, xShapeBuffer, xLength); auto zOffset = shape::getIndexOffset(sLength - e, zShapeBuffer, xLength); result[zOffset] = dx[xOffset]; } } } } op_def static X op(X d1, X *params) { return d1; } }; template<typename X> class SoftMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<X *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); auto shape = shape::shapeOf(xShapeBuffer); __shared__ X maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; auto length = shape::length(xShapeBuffer); auto stride = shape::stride(xShapeBuffer); //compute the row wise maxes __shared__ Nd4jLong maxShape[2]; // it's always 2d here __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) { maxResult = (X) 0.0; maxShape[0] = shape[0]; maxShape[1] = 1; maxResultShapeBuffer = shape::shapeBuffer(2, nd4j::DataTypeUtils::fromT<X>(), maxShape, tempBuffer); } __syncthreads(); functions::reduce::ReduceSameInplace<X>::execScalarCudaLegacy(nd4j::reduce::Max, dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarInplace<X,X,X>::transformCudaLegacy(nd4j::scalar::Subtract, &maxResult, dx, xShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::TransformStrictInplace<X>::transformCudaLegacy(nd4j::transform::Exp, result, zShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceSameInplace<X>::execScalarCudaLegacy(nd4j::reduce::Sum, result, zShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarInplace<X,X,X>::transformCudaLegacy(nd4j::scalar::Divide, &maxResult, result, zShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer); } #endif static void execSpecial( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<X *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); if (shape::isMatrix(xShapeBuffer)) { auto shape = shape::shapeOf(xShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; //compute the row wise maxes auto maxResult = new X[shape[0]]; for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, nd4j::DataTypeUtils::fromT<X>(), maxShape); functions::reduce::ReduceSameFunction<X>::exec(nd4j::reduce::Max, dx, xShapeBuffer, extraParams, maxResult, maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<X, X, X>::exec(nd4j::broadcast::Subtract, dx, xShapeBuffer, maxResult, maxResultShapeBuffer, result, zShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::TransformStrict<X>::exec(nd4j::transform::Exp, result, zShapeBuffer, result, zShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceSameFunction<X>::exec(nd4j::reduce::Sum, result, zShapeBuffer, extraParams, maxResult, maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<X,X,X>::exec(nd4j::broadcast::Divide, result, zShapeBuffer, maxResult, maxResultShapeBuffer, result, zShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); delete[] maxResultShapeBuffer; delete[] maxResult; } else if (shape::isVector(xShapeBuffer)) { auto max = -nd4j::DataTypeUtils::max<X>(); X sum = 0; int elementWiseStride = shape::elementWiseStride(xShapeBuffer); int resultElementWiseStride = shape::elementWiseStride(zShapeBuffer); int length = shape::length(xShapeBuffer); if (elementWiseStride >= 1 && resultElementWiseStride >= 1) { if (elementWiseStride == 1 && resultElementWiseStride == 1) { //#pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<X>(max, dx[i]); } //#pragma omp parallel for simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] = nd4j::math::nd4j_exp<X,X>(dx[i] - max); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; } } else { //#pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<X>(max, dx[i * elementWiseStride]); } //#pragma omp parallel for simd reduction(sumT:sum) for (int i = 0; i < length; i++) { auto r = nd4j::math::nd4j_exp<X, X>(dx[i * elementWiseStride] - max); result[i * resultElementWiseStride] = r; sum += r; } //#pragma omp simd for (int i = 0; i < length; i++) { result[i * resultElementWiseStride] /= sum; } } } } } op_def static X op(X d1, X *params) { return d1; } }; template<typename X> class LogSoftMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); auto stride = shape::stride(xShapeBuffer); //iterate along rows auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<X *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); __shared__ X maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; if (threadIdx.x == 0) { maxResult = (X) 0.0; } __syncthreads(); //compute the row wise maxes Nd4jLong maxShape[2] = { shape[0], 1 }; __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) maxResultShapeBuffer = shape::shapeBuffer(2, nd4j::DataTypeUtils::fromT<X>(), maxShape, tempBuffer); __syncthreads(); functions::reduce::ReduceSameInplace<X>::execScalarCudaLegacy(nd4j::reduce::Max, dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarInplace<X,X,X>::transformCudaLegacy(nd4j::scalar::Subtract, &maxResult, dx, xShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::TransformStrictInplace<X>::transformCudaLegacy(nd4j::transform::Exp, result, zShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceSameInplace<X>::execScalarCudaLegacy(nd4j::reduce::Sum, result, zShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarInplace<X,X,X>::transformCudaLegacy(nd4j::scalar::Divide, &maxResult, result, zShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer); __syncthreads(); functions::transform::TransformStrictInplace<X>::transformCudaLegacy(nd4j::transform::Log, result, zShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); } #endif static void execSpecial( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<X *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); if (shape::isMatrix(xShapeBuffer, 2)) { auto shape = shape::shapeOf(xShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; //compute the row wise maxes auto maxResult = new X[shape[0]]; #pragma omp simd for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, nd4j::DataTypeUtils::fromT<X>(), maxShape); functions::reduce::ReduceSameFunction<X>::exec(nd4j::reduce::Max, dx, xShapeBuffer, extraParams, maxResult, maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<X,X,X>::exec(nd4j::broadcast::Subtract, dx, xShapeBuffer, maxResult, maxResultShapeBuffer, result, zShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::TransformStrict<X>::exec(nd4j::transform::Exp, result, zShapeBuffer, result, zShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceSameFunction<X>::exec(nd4j::reduce::Sum, result, zShapeBuffer, extraParams, maxResult, maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<X,X,X>::exec(nd4j::broadcast::Divide, result, zShapeBuffer, maxResult, maxResultShapeBuffer, result, zShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); functions::transform::TransformStrict<X>::exec(nd4j::transform::Log, result, zShapeBuffer, result, zShapeBuffer, extraParams, tadShapeInfo, tadOffsets); delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer, 2)) { auto max = -FLOAT_MAX_VALUE; X sum = 0; auto elementWiseStride = shape::elementWiseStride(xShapeBuffer); auto length = shape::length(xShapeBuffer); if (elementWiseStride == 1) { //#pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<X>(max, result[i]); } //#pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] = nd4j::math::nd4j_exp<X, X>(dx[i] - max); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; result[i] = nd4j::math::nd4j_log<X, X>(result[i]); } } else if (elementWiseStride > 1) { //#pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<X>(max, result[i * elementWiseStride]); } //#pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i * elementWiseStride] = nd4j::math::nd4j_exp<X, X>(dx[i * elementWiseStride] - max); sum += result[i * elementWiseStride]; } //#pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] /= sum; result[i * elementWiseStride] = nd4j::math::nd4j_log<X, X>(result[i * elementWiseStride]); } } } } op_def static X op(X d1, X *params) { return d1; } }; /** * softmax(x) */ template<typename X> class SoftMaxDerivative { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<X *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); auto shape = shape::shapeOf(xShapeBuffer); __shared__ X maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; __shared__ Nd4jLong resultEWS; auto length = shape::length(xShapeBuffer); if (threadIdx.x == 0) { resultEWS = shape::elementWiseStride(zShapeBuffer); maxResult = (X) 0.0; } __syncthreads(); auto tride = shape::stride(xShapeBuffer); Nd4jLong maxShape[2] = { shape[0], 1 }; __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) maxResultShapeBuffer = shape::shapeBuffer(2, nd4j::DataTypeUtils::fromT<X>(), maxShape, tempBuffer); __syncthreads(); functions::reduce::ReduceSameInplace<X>::execScalarCudaLegacy(nd4j::reduce::Max, dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarInplace<X,X,X>::transformCudaLegacy(nd4j::scalar::Subtract, &maxResult, dx, xShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::TransformStrictInplace<X>::transformCudaLegacy(nd4j::transform::Exp, result, zShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceSameInplace<X>::execScalarCudaLegacy(nd4j::reduce::Sum, result, zShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarInplace<X,X,X>::transformCudaLegacy(nd4j::scalar::Divide, &maxResult, result, zShapeBuffer, extraParams, result, zShapeBuffer, allocationPointer); __syncthreads(); if (resultEWS >= 1) { for (int i = threadIdx.x; i < length; i += blockDim.x) { result[i * resultEWS] = result[i * resultEWS] * ((X) 1.0 - result[i * resultEWS]); } } else { printf("Non element wise stride not supported right now\n"); } } #endif static void execSpecial( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<X *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); if (shape::isMatrix(xShapeBuffer, 2)) { auto shape = shape::shapeOf(xShapeBuffer); auto resultEleStide = shape::elementWiseStride(zShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; auto len = shape::length(xShapeBuffer); //compute the row wise maxes auto maxResult = new X[shape[0]]; #pragma omp simd for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, nd4j::DataTypeUtils::fromT<X>(), maxShape); functions::reduce::ReduceSameFunction<X>::exec(nd4j::reduce::Max, dx, xShapeBuffer, extraParams, maxResult, maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<X,X,X>::exec(nd4j::broadcast::Subtract, result, zShapeBuffer, maxResult, maxResultShapeBuffer, result, zShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::TransformStrict<X>::exec(nd4j::transform::Exp, result, zShapeBuffer, result, zShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceSameFunction<X>::exec(nd4j::reduce::Sum, result, zShapeBuffer, extraParams, maxResult, maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<X,X,X>::exec(nd4j::broadcast::Divide, result, zShapeBuffer, maxResult, maxResultShapeBuffer, result, zShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); if (resultEleStide >= 1) { if (resultEleStide == 1) { #pragma omp simd for (int i = 0; i < len; i++) { result[i] = result[i] * (static_cast<X>(1.0f) - result[i]); } } else { #pragma omp simd for (int i = 0; i < len; i++) { result[i * resultEleStide] = result[i * resultEleStide] * (static_cast<X>(1.0f) - result[i * resultEleStide]); } } } else { for (int i = 0; i < len; i++) { Nd4jLong zOffset = shape::getIndexOffset(i, zShapeBuffer, len); result[zOffset] = result[zOffset] * ((X) 1.0f - result[zOffset]); } } delete[] maxResultShapeBuffer; delete[] maxResult; } else if (shape::isVector(xShapeBuffer, 2)) { auto max = -nd4j::DataTypeUtils::max<X>(); X sum = 0; auto elementWiseStride = shape::elementWiseStride(xShapeBuffer); auto length = shape::length(xShapeBuffer); if (elementWiseStride == 1) { //#pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<X>(max, result[i]); } //#pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] -= max; result[i] = nd4j::math::nd4j_exp<X, X>(result[i]); sum += result[i]; } //#pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; } //#pragma omp simd for (int i = 0; i < length; i++) { result[i] = result[i] * ((X) 1.0f - result[i]); } } else if (elementWiseStride >= 1) { //#pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<X>(max, result[i * elementWiseStride]); } //#pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i * elementWiseStride] -= max; result[i * elementWiseStride] = nd4j::math::nd4j_exp<X, X>(result[i * elementWiseStride]); sum += result[i * elementWiseStride]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] /= sum; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] = result[i * elementWiseStride] * ((X) 1.0f - result[i * elementWiseStride]); } } else { printf("non-ews access on row not implemented yet"); } } } op_def static X op(X d1, X *params) { return d1; } }; template<typename X, typename Z> class IsMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void doAllCuda( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, int *allocationPointer, void *reductionPointer) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<Z *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); // this code is safe to delete, it's never used /* __shared__ int maxIdx; __shared__ int length; if (threadIdx.x == 0) { length = shape::length(zShapeBuffer); } __syncthreads(); functions::indexreduce::IndexReduce<T>::template transform<simdOps::IndexMax<T>>( dx, xShapeBuffer, extraParams, result, zShapeBuffer, nullptr, 1, 1, allocationPointer, reductionPointer, nullptr, nullptr); __syncthreads(); if (threadIdx.x == 0) maxIdx = (int)result[0]; __syncthreads(); for (int i = threadIdx.x; i < length; i += blockDim.x) result[i] = 0; __syncthreads(); if (threadIdx.x == 0) { result[maxIdx] = 1.0; } */ } #endif #ifdef __CUDACC__ inline __host__ #elif defined(__GNUC__) #endif static void doAll( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<Z *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); auto length = shape::length(xShapeBuffer); auto eleStride = shape::elementWiseStride(xShapeBuffer); auto resultEleStride = shape::elementWiseStride(zShapeBuffer); auto xOrder = shape::order(xShapeBuffer); auto resultOrder = shape::order(zShapeBuffer); /* int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); */ if (xOrder == resultOrder && xOrder == 'c') { if (eleStride == 1 && resultEleStride == 1) { if (length < ELEMENT_THRESHOLD) { int maxIdx = 0; auto currMax = dx[0]; //#pragma omp simd reduction (max:maxIdx,currMax) for (int i = 0; i < length; i++) { if (currMax < dx[i]) { currMax = dx[i]; maxIdx = i; } result[i] = static_cast<Z>(0); } result[maxIdx] = static_cast<Z>(1); } else { int maxIdx = 0; auto currMax = dx[0]; #pragma omp parallel proc_bind(AFFINITY) { int maxIdxLocal = maxIdx; auto currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i]) { currMaxLocal = dx[i]; maxIdxLocal = i; } result[i] = static_cast<Z>(0); } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } result[maxIdx] = static_cast<Z>(1); } } else { if (length < ELEMENT_THRESHOLD) { int maxIdx = 0; auto currMax = dx[0]; //#pragma omp simd reduction(max:maxIdx,currMax) for (int i = 0; i < length; i++) { result[i * resultEleStride] = static_cast<Z>(0); if (currMax < dx[i * eleStride]) { currMax = dx[i * eleStride]; maxIdx = i; } } result[maxIdx * resultEleStride] = static_cast<Z>(1); } else { int maxIdx = 0; auto currMax = dx[0]; #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; auto currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { result[i * resultEleStride] = static_cast<Z>(0); if (currMaxLocal < dx[i * eleStride]) { currMaxLocal = dx[i * eleStride]; maxIdxLocal = i; } } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } result[maxIdx * resultEleStride] = static_cast<Z>(1); } } } else { Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong resultStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto resultStride = shape::stride(zShapeBuffer); auto rank = shape::rank(xShapeBuffer); auto originalResult = result; if (PrepareTwoRawArrayIter<X, Z>(rank, xShape, dx, xStride, result, resultStride, &rank, shapeIter, &dx, xStridesIter, &result, resultStridesIter) >= 0) { auto value = dx[0]; int idx = 0; int maxIdx = 0; ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { if (dx[0] > value) { value = dx[0]; maxIdx = idx; } idx++; result[0] = static_cast<Z>(0); } ND4J_RAW_ITER_TWO_NEXT( dim, rank, coord, shapeIter, dx, xStridesIter, result, resultStridesIter); //pointer to where max value would be if (shape::order(zShapeBuffer) == 'c' || (shape::order(zShapeBuffer) == 'f' && maxIdx * shape::stride(zShapeBuffer)[shape::rank(zShapeBuffer) - 1] >= shape::length(zShapeBuffer))) originalResult[maxIdx] = static_cast<Z>(1); else originalResult[maxIdx * shape::stride(zShapeBuffer)[shape::rank(zShapeBuffer) - 1]] = static_cast<Z>(1); } } } public: #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<Z *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); // FIXME: MAX_DIMENSION is lower then FP16 frame if (extraParams == nullptr || (int) extraParams[0] == MAX_DIMENSION) { doAllCuda(dx, xShapeBuffer, result, zShapeBuffer, extraParams, allocationPointer, reductionPointer); } } #endif static void execSpecial( void *vx, Nd4jLong *xShapeBuffer, void *vresult, Nd4jLong *zShapeBuffer, void *vextraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dx = reinterpret_cast<X *>(vx); auto result = reinterpret_cast<Z *>(vresult); auto extraParams = reinterpret_cast<X *>(vextraParams); //FIXME: this op should be moved to CustomOps if (extraParams == nullptr || (int)extraParams[0] == 0 || ((int)extraParams[0] == 1 && (int)extraParams[1] == MAX_DIMENSION)) { doAll(dx, xShapeBuffer, result, zShapeBuffer, extraParams); } else if (shape::isVector(xShapeBuffer)) { auto dimensionLength = (int)extraParams[0]; auto dimension = new int[dimensionLength]; auto length = shape::length(xShapeBuffer); for (int i = 0; i < dimensionLength; i++) { dimension[i] = (int)extraParams[i + 1]; } if (shape::shapeOf(xShapeBuffer)[dimension[0]] == 1) { for (int i = 0; i < length; i++) { result[i] = static_cast<Z>(1); } } else { auto eleStride = shape::elementWiseStride(xShapeBuffer); if (eleStride == 1) { int maxIdx = 0; auto currMax = dx[0]; if (length < ELEMENT_THRESHOLD) { //#pragma omp simd reduction(max:maxIdx,currMax) for (int i = 0; i < length; i++) { if (currMax < dx[i]) { currMax = dx[i]; maxIdx = i; } result[i] = static_cast<Z>(0); } } else { #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; auto currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i]) { currMaxLocal = dx[i]; maxIdxLocal = i; } result[i] = static_cast<Z>(0); } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } } result[maxIdx] = static_cast<Z>(1); } else { int maxIdx = 0; auto currMax = dx[0]; if (length < ELEMENT_THRESHOLD) { //#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY) for (int i = 0; i < length; i++) { if (currMax < dx[i * eleStride]) { currMax = dx[i * eleStride]; maxIdx = i; } result[i] = static_cast<Z>(0); } } else { #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; auto currMaxLocal = currMax; //#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i * eleStride]) { currMaxLocal = dx[i * eleStride]; maxIdxLocal = i; } result[i] = static_cast<Z>(0); } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } } result[maxIdx] = static_cast<Z>(1); } } } else { auto dimensionLength = (int) extraParams[0]; auto dimension = new int[dimensionLength]; #pragma omp simd for (int i = 0; i < dimensionLength; i++) { dimension[i] = (int) extraParams[i + 1]; } //decompose in to several sub tads after //moving all dimensions (in sorted order) //to the back. //permuted version of the x shape info for setting up the tad problem auto tadShapeShapeInfo = tadShapeInfo; shape::TAD tad (xShapeBuffer, dimension, dimensionLength); if(tadShapeInfo==nullptr) { tad.createTadOnlyShapeInfo(); tad.createOffsets(); tadShapeShapeInfo = tad.tadOnlyShapeInfo; tadOffsets = tad.tadOffsets; } auto tadLength = shape::tadLength(xShapeBuffer, dimension, dimensionLength); auto tads = shape::length(xShapeBuffer) / tadLength; int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); auto tadEWS = shape::elementWiseStride(tadShapeShapeInfo); auto zEWS = tadEWS; int span = (tads / num_threads) + 8; #pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) { int tid = omp_get_thread_num(); int start = span * tid; int end = span * (tid + 1); if (end > tads) end = tads; for (int r = start; r < end; r++) { if (tadEWS > 0 && zEWS > 0 && dimensionLength == 1) { auto rX = dx + tadOffsets[r]; auto rZ = result + tadOffsets[r]; auto maxValue = rX[0]; int maxIdx = 0; if (tadEWS == 1 && zEWS == 1) { //#pragma omp simd reduction(max:maxValue,maxIdx) for (int i = 0; i < tadLength; i++) { if (rX[i] > maxValue) { maxIdx = i; maxValue = rX[i]; } } //#pragma omp simd for (int i = 0; i < tadLength; i++) { rZ[i] = static_cast<Z>(maxIdx == i); } } else { //#pragma omp parallel for reduction(max:maxValue,maxIdx) default(shared) for (int i = 0; i < tadLength; i++) { if (rX[i * tadEWS] > maxValue) { maxIdx = i; maxValue = rX[i * tadEWS]; } } //#pragma omp simd for (int i = 0; i < tadLength; i++) { rZ[i * zEWS] = static_cast<Z>(maxIdx == i); } } } else { int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); auto offset = tadOffsets[r]; Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong resultStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(tadShapeShapeInfo); auto xStride = shape::stride(tadShapeShapeInfo); auto resultStride = shape::stride(tadShapeShapeInfo); int rank = shape::rank(tadShapeShapeInfo); auto xPointer = dx + offset; auto resultPointer = result + offset; auto maxValue = xPointer[0]; auto maxCursor = resultPointer; Nd4jPointer maxCursorLong = reinterpret_cast<Nd4jPointer>(maxCursor); if (PrepareTwoRawArrayIter<X,Z>(rank, xShape, xPointer, xStride, resultPointer, resultStride, &rank, shapeIter, &xPointer, xStridesIter, &resultPointer, resultStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { if (maxValue < xPointer[0]) { maxCursor = resultPointer; maxCursorLong = reinterpret_cast<Nd4jPointer>(resultPointer); maxValue = xPointer[0]; } resultPointer[0] = static_cast<Z>(0); } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, xPointer, xStridesIter, resultPointer, resultStridesIter); maxCursor = reinterpret_cast<Z *>(maxCursorLong); maxCursor[0] = static_cast<Z>(1);; } } } } delete[] dimension; } } op_def static Z op(X d1, X *params) { return nd4j::math::softplus<X,Z>(d1); } }; }
GB_unaryop__minv_int16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_fp32 // op(A') function: GB_tran__minv_int16_fp32 // C type: int16_t // A type: float // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ float #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ int16_t z ; GB_CAST_SIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_fp32 ( int16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sectionstest.c
#include<stdio.h> #include<unistd.h> #include<limits.h> #include<omp.h> void sectionstest(); int main(int argc,char* *argv[]) { double start=omp_get_wtime(); sectionstest(); printf("finish 1 elapsed time=%lf\n",omp_get_wtime()-start); } void sectionstest() { #pragma omp parallel sections { #pragma omp section sleep(10); #pragma omp section sleep(10); } }
GB_unaryop__identity_fp64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp64_bool // op(A') function: GB_tran__identity_fp64_bool // C type: double // A type: bool // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp64_bool ( double *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
renderer.h
#pragma once #include "frame_buffer.h" #include "scene.h" #include "camera.h" #include "utils.h" color shade(const Ray& ray, Scene& scene, size_t depth) { if (depth <= 0 ){ return color(0.0); } Hit hit; if (scene.get_hit(ray, 0.001, Inf, hit)) { color attenuation; Ray scattered; if (hit.material->scatter(ray, hit, attenuation, scattered)) return attenuation * shade(scattered, scene, depth-1); return color(0.0); } // world/sky color auto t = 0.5 * (ray.direction().y + 1.0); return (1.0 - t) * color(1.0) + t * color(0.5, 0.7, 1.0, 1.0); } void render(Scene& scene, Camera& camera, FrameBuffer& fb) { const size_t num_samples = 100; const size_t max_light_bounces = 50; auto scale = (1.0 / num_samples); #pragma omp parallel for for (int row = 0; row < fb.height(); row++) { for (int col = 0; col < fb.width(); col++) { color color_val(0.0); for (int s = 0; s < num_samples;s++) { auto u = double(col + random_double()) / (fb.width() - 1); auto v = double(row + random_double()) / (fb.height() - 1); auto ray = camera.get_ray(u, v); color_val += shade(ray, scene, max_light_bounces); } color_val *= scale; fb.set_pixel(fb.height() - row - 1, col, color_val); } } }
binary_class_evaluation.h
#pragma once #include <algorithm> #include <dmlc/logging.h> #include <dmlc/omp.h> namespace dmlc { template <typename V> class BinClassEval { public: BinClassEval(const V* const label, const V* const predict, size_t n, int num_threads = 2) : label_(label), predict_(predict), size_(n), nt_(num_threads) { } ~BinClassEval() { } V AUC() { size_t n = size_; struct Entry { V label; V predict; }; std::vector<Entry> buff(n); for (size_t i = 0; i < n; ++i) { buff[i].label = label_[i]; buff[i].predict = predict_[i]; } std::sort(buff.data(), buff.data()+n, [](const Entry& a, const Entry&b) { return a.predict < b.predict; }); V area = 0, cum_tp = 0; for (size_t i = 0; i < n; ++i) { if (buff[i].label > 0) { cum_tp += 1; } else { area += cum_tp; } } if (cum_tp == 0 || cum_tp == n) return 1; area /= cum_tp * (n - cum_tp); return area < 0.5 ? 1 - area : area; } V Accuracy(V threshold) { V correct = 0; size_t n = size_; #pragma omp parallel for reduction(+:correct) num_threads(nt_) for (size_t i = 0; i < n; ++i) { if ((label_[i] > 0 && predict_[i] > threshold) || (label_[i] <= 0 && predict_[i] <= threshold)) correct += 1; } V acc = correct / (V) n; return acc > 0.5 ? acc : 1 - acc; } V LogLoss() { V loss = 0; size_t n = size_; #pragma omp parallel for reduction(+:loss) num_threads(nt_) for (size_t i = 0; i < n; ++i) { V y = label_[i] > 0; V p = 1 / (1 + exp(- predict_[i])); p = p < 1e-10 ? 1e-10 : p; loss += y * log(p) + (1 - y) * log(1 - p); } return - loss; } V LogitObjv() { V objv = 0; #pragma omp parallel for reduction(+:objv) num_threads(nt_) for (size_t i = 0; i < size_; ++i) { V y = label_[i] > 0 ? 1 : -1; objv += log( 1 + exp( - y * predict_[i] )); } return objv; } V Copc(){ V clk = 0; V clk_exp = 0.0; #pragma omp parallel for reduction(+:clk,clk_exp) num_threads(nt_) for (size_t i = 0; i < size_; ++i) { if (label_[i] > 0) clk += 1; clk_exp += 1.0 / ( 1.0 + exp( - predict_[i] )); } return clk / clk_exp; } private: V const* label_; V const* predict_; size_t size_; int nt_; }; } // namespace dmlc
depend-iterator-1.c
/* { dg-additional-options "-Wno-volatile" { target c++ } } */ int arr[64], arr2[64]; struct S { int a[4]; } k; short arr4[4]; volatile int v; #define TEST_EQ(x,y) ({ int o[x == y ? 1 : -1]; 0; }) void foo (unsigned char i, signed char j) { #pragma omp task depend (iterator (j=6:2:-2) , out : \ arr[TEST_EQ (sizeof (j), sizeof (int)), \ TEST_EQ (sizeof (i), sizeof (unsigned char)), \ TEST_EQ (sizeof (k), sizeof (struct S)), j], \ arr2[TEST_EQ (((__typeof (j)) -1) < 0, 1), \ TEST_EQ (((__typeof (i)) -1) < 0, 0), \ TEST_EQ (((__typeof (k.a[0])) -1) < 0, 1), j]) \ depend(out: arr[0]) \ depend (iterator (long long i=__LONG_LONG_MAX__ - 4:__LONG_LONG_MAX__ - 2:2, \ unsigned short j=~0U-16:~0U-8:3, \ short *k=&arr4[1]:&arr4[2]:1) , in : \ arr[TEST_EQ (sizeof (i), sizeof (long long)), \ TEST_EQ (sizeof (j), sizeof (unsigned short)), \ TEST_EQ (sizeof (k), sizeof (short *)), \ TEST_EQ (sizeof (*k), sizeof (short)), i - __LONG_LONG_MAX__ + 4], \ arr2[TEST_EQ (((__typeof (i)) -1) < 0, 1), \ TEST_EQ (((__typeof (j)) -1) < 0, 0), \ TEST_EQ (((__typeof (*k)) -1) < 0, 1), j - (~0U-16)], \ arr2[k - &arr4[0]]) \ depend(in : k) v++; } void bar (unsigned char i, signed char j) { int m = j; int n = j + 2; #pragma omp task depend (iterator (j=6:2:m) , out : \ arr[TEST_EQ (sizeof (j), sizeof (int)), \ TEST_EQ (sizeof (i), sizeof (unsigned char)), \ TEST_EQ (sizeof (k), sizeof (struct S)), j], \ arr2[TEST_EQ (((__typeof (j)) -1) < 0, 1), \ TEST_EQ (((__typeof (i)) -1) < 0, 0), \ TEST_EQ (((__typeof (k.a[0])) -1) < 0, 1), j]) \ depend(out: arr[0]) \ depend (iterator (long long i=__LONG_LONG_MAX__ - 4 - n:__LONG_LONG_MAX__ - 2:2, \ unsigned short j=~0U-16:~0U-8-n:3, \ short *k=&arr4[1]:&arr4[n + 2]:1) , in : \ arr[TEST_EQ (sizeof (i), sizeof (long long)), \ TEST_EQ (sizeof (j), sizeof (unsigned short)), \ TEST_EQ (sizeof (k), sizeof (short *)), \ TEST_EQ (sizeof (*k), sizeof (short)), i - __LONG_LONG_MAX__ + 4], \ arr2[TEST_EQ (((__typeof (i)) -1) < 0, 1), \ TEST_EQ (((__typeof (j)) -1) < 0, 0), \ TEST_EQ (((__typeof (*k)) -1) < 0, 1), j - (~0U-16)], \ arr2[k - &arr4[0]:10]) \ depend(in : k) v++; } void baz (void) { #pragma omp parallel #pragma omp master { #pragma omp task depend(iterator(unsigned long int k = 0 : 2) , inout : \ arr[TEST_EQ (sizeof (k), sizeof (unsigned long)), \ TEST_EQ (((__typeof (k)) -1) < 0, 0), k]) \ depend(iterator(signed char s = -3 : -12 : -1) , out : \ arr[TEST_EQ (sizeof (s), sizeof (signed char)), \ TEST_EQ (((__typeof (s)) -1) < 0, 1), s + 12]) v++; } }
NonlocalMarching_Inpaint_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC * * Copyright 2017 Daniil Kazantsev * Copyright 2017 Srikanth Nagella, Edoardo Pasca * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "NonlocalMarching_Inpaint_core.h" #include "utils.h" /* C-OMP implementation of Nonlocal Vertical Marching inpainting method (2D case) * The method is heuristic but computationally efficent (especially for larger images). * It developed specifically to smoothly inpaint horizontal or inclined missing data regions in sinograms * The method WILL not work satisfactory if you have lengthy vertical stripes of missing data * * Input: * 1. 2D image or sinogram with horizontal or inclined regions of missing data * 2. Mask of the same size as A in 'unsigned char' format (ones mark the region to inpaint, zeros belong to the data) * 3. Linear increment to increase searching window size in iterations, values from 1-3 is a good choice * * Output: * 1. Inpainted image or a sinogram * 2. updated mask * * Reference: D. Kazantsev (paper in preparation) */ float NonlocalMarching_Inpaint_main(float *Input, unsigned char *M, float *Output, unsigned char *M_upd, int SW_increment, int iterationsNumb, int trigger, int dimX, int dimY, int dimZ) { int i, j, i_m, j_m, counter, iter, iterations_number, W_fullsize, switchmask, switchcurr, counterElements; float *Gauss_weights; /* copying M to M_upd */ copyIm_unchar(M, M_upd, dimX, dimY, 1); /* Copying the image */ copyIm(Input, Output, dimX, dimY, 1); /* Find how many inpainting iterations (equal to the number of ones) required based on a mask */ if (iterationsNumb == 0) { iterations_number = 0; for (i=0; i<dimY*dimX; i++) { if (M[i] == 1) iterations_number++; } if ((int)(iterations_number/dimY) > dimX) iterations_number = dimX; } else iterations_number = iterationsNumb; if (iterations_number == 0) printf("%s \n", "Nothing to inpaint, zero mask!"); else { printf("%s %i \n", "Max iteration number equals to:", iterations_number); /* Inpainting iterations run here*/ int W_halfsize = 1; for(iter=0; iter < iterations_number; iter++) { //if (mod (iter, 2) == 0) {W_halfsize += 1;} // printf("%i \n", W_halfsize); /* pre-calculation of Gaussian distance weights */ W_fullsize = (int)(2*W_halfsize + 1); /*full size of similarity window */ Gauss_weights = (float*)calloc(W_fullsize*W_fullsize,sizeof(float )); counter = 0; for(i_m=-W_halfsize; i_m<=W_halfsize; i_m++) { for(j_m=-W_halfsize; j_m<=W_halfsize; j_m++) { Gauss_weights[counter] = exp(-(pow((i_m), 2) + pow((j_m), 2))/(2*W_halfsize*W_halfsize)); counter++; } } if (trigger == 0) { /*Matlab*/ #pragma omp parallel for shared(Output, M_upd, Gauss_weights) private(i, j, switchmask, switchcurr) for(j=0; j<dimY; j++) { switchmask = 0; for(i=0; i<dimX; i++) { switchcurr = 0; if ((M_upd[j*dimX + i] == 1) && (switchmask == 0)) { /* perform inpainting of the current pixel */ inpaint_func(Output, M_upd, Gauss_weights, i, j, dimX, dimY, W_halfsize, W_fullsize); /* add value to the mask*/ M_upd[j*dimX + i] = 0; switchmask = 1; switchcurr = 1; } if ((M_upd[j*dimX + i] == 0) && (switchmask == 1) && (switchcurr == 0)) { /* perform inpainting of the previous (i-1) pixel */ inpaint_func(Output, M_upd, Gauss_weights, i-1, j, dimX, dimY, W_halfsize, W_fullsize); /* add value to the mask*/ M_upd[(j)*dimX + i-1] = 0; switchmask = 0; } } } } else { /*Python*/ /* find a point in the mask to inpaint */ #pragma omp parallel for shared(Output, M_upd, Gauss_weights) private(i, j, switchmask, switchcurr) for(i=0; i<dimX; i++) { switchmask = 0; for(j=0; j<dimY; j++) { switchcurr = 0; if ((M_upd[j*dimX + i] == 1) && (switchmask == 0)) { /* perform inpainting of the current pixel */ inpaint_func(Output, M_upd, Gauss_weights, i, j, dimX, dimY, W_halfsize, W_fullsize); /* add value to the mask*/ M_upd[j*dimX + i] = 0; switchmask = 1; switchcurr = 1; } if ((M_upd[j*dimX + i] == 0) && (switchmask == 1) && (switchcurr == 0)) { /* perform inpainting of the previous (j-1) pixel */ inpaint_func(Output, M_upd, Gauss_weights, i, j-1, dimX, dimY, W_halfsize, W_fullsize); /* add value to the mask*/ M_upd[(j-1)*dimX + i] = 0; switchmask = 0; } } } } free(Gauss_weights); /* check if possible to terminate iterations earlier */ counterElements = 0; for(i=0; i<dimX*dimY; i++) if (M_upd[i] == 0) counterElements++; if (counterElements == dimX*dimY) { printf("%s \n", "Padding completed!"); break; } W_halfsize += SW_increment; } printf("%s %i \n", "Iterations stopped at:", iter); } return *Output; } float inpaint_func(float *U, unsigned char *M_upd, float *Gauss_weights, int i, int j, int dimX, int dimY, int W_halfsize, int W_fullsize) { int i1, j1, i_m, j_m, counter; float sum_val, sumweight; /*method 1: inpainting based on Euclidian weights */ sumweight = 0.0f; counter = 0; sum_val = 0.0f; for(i_m=-W_halfsize; i_m<=W_halfsize; i_m++) { i1 = i+i_m; for(j_m=-W_halfsize; j_m<=W_halfsize; j_m++) { j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { if (M_upd[j1*dimX + i1] == 0) { sumweight += Gauss_weights[counter]; } } counter++; } } counter = 0; sum_val = 0.0f; for(i_m=-W_halfsize; i_m<=W_halfsize; i_m++) { i1 = i+i_m; for(j_m=-W_halfsize; j_m<=W_halfsize; j_m++) { j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { if ((M_upd[j1*dimX + i1] == 0) && (sumweight != 0.0f)) { /* we have data so add it with Euc weight */ sum_val += (Gauss_weights[counter]/sumweight)*U[j1*dimX + i1]; } } counter++; } } U[j*dimX + i] = sum_val; return *U; }
ExampleClusterer_Shared.h
/** * grove: ExampleClusterer_Shared.h * Copyright (c) Torr Vision Group, University of Oxford, 2017. All rights reserved. */ #ifndef H_GROVE_EXAMPLECLUSTERER_SHARED #define H_GROVE_EXAMPLECLUSTERER_SHARED #include <ORUtils/MathUtils.h> #include <ORUtils/PlatformIndependence.h> #include "../../util/Array.h" namespace grove { /** * \brief Computes the final cluster index for the specified example by following the parent links computed in compute_parent. * * \note The compute_parent function split all the examples into subtrees and allocated a cluster index to the root * of each subtree. With this function, we navigate each example's subtree until we find the root and then copy * the cluster index across to the example. We also compute the size of each cluster. * * \param exampleSetIdx The index of the example set containing the example. * \param exampleIdx The index of the example within its example set. * \param exampleSetCapacity The maximum size of each example set. * \param parents An image containing the parent indices for the examples. * \param clusterIndices An image containing the cluster indices for the examples. * \param clusterSizes An array in which to keep track of the size of each cluster. Must contain zeros * at the point at which the function is called. */ _CPU_AND_GPU_CODE_ inline void compute_cluster_index(int exampleSetIdx, int exampleIdx, int exampleSetCapacity, const int *parents, int *clusterIndices, int *clusterSizes) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Compute the raster offset of the specified example in the example sets image. const int exampleOffset = exampleSetOffset + exampleIdx; // Follow the parent links from the specified example up to the root of its subtree. // Note that there is no need to check if the example is valid, since compute_parent // set the parents of invalid examples to themselves. int currentIdx = exampleIdx; int parentIdx = parents[exampleOffset]; while(parentIdx != currentIdx) { currentIdx = parentIdx; parentIdx = parents[exampleSetOffset + parentIdx]; } // Get the cluster index of the subtree root and assign it to this example. const int clusterIdx = clusterIndices[exampleSetOffset + parentIdx]; clusterIndices[exampleOffset] = clusterIdx; // If the cluster is valid then atomically increase its size (it might be invalid if we started from an invalid example). if(clusterIdx >= 0) { // Note: The __CUDA_ARCH__ check is needed because this function is not a template. #if defined(__CUDACC__) && defined(__CUDA_ARCH__) atomicAdd(&clusterSizes[exampleSetOffset + clusterIdx], 1); #else #ifdef WITH_OPENMP #pragma omp atomic #endif clusterSizes[exampleSetOffset + clusterIdx]++; #endif } } /** * \brief Compute the density of examples around an individual example in one of the example sets. * * \param exampleSetIdx The index of the example set containing the example. * \param exampleIdx The index of the example within its example set. * \param exampleSets An image containing the sets of examples to be clustered (one set per row). The width of * the image specifies the maximum number of examples that can be contained in each set. * \param exampleSetSizes The number of valid examples in each example set. * \param exampleSetCapacity The maximum size of each example set. * \param sigma The sigma of the Gaussian used when computing the example density. * \param densities The memory in which to store the density of each example (one example set per row, * one density value per column). */ template <typename ExampleType> _CPU_AND_GPU_CODE_TEMPLATE_ inline void compute_density(int exampleSetIdx, int exampleIdx, const ExampleType *exampleSets, const int *exampleSetSizes, int exampleSetCapacity, float sigma, float *densities) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Compute the raster offset of the specified example in the example sets image. const int exampleOffset = exampleSetOffset + exampleIdx; // Look up the size of the specified example set. const int exampleSetSize = exampleSetSizes[exampleSetIdx]; float density = 0.0f; // If the example is valid, loop over all of the examples in its set and // compute the density based on the examples that are within 3 * sigma of it // (points further away would only make a small contribution to the density). if(exampleIdx < exampleSetSize) { const float threeSigmaSq = (3.0f * sigma) * (3.0f * sigma); const float minusOneOverTwoSigmaSq = -1.0f / (2.0f * sigma * sigma); const ExampleType centreExample = exampleSets[exampleOffset]; for(int i = 0; i < exampleSetSize; ++i) { const ExampleType otherExample = exampleSets[exampleSetOffset + i]; // Note: ExampleType must have a distance_squared function defined for it. const float normSq = distance_squared(centreExample, otherExample); if(normSq < threeSigmaSq) { density += expf(normSq * minusOneOverTwoSigmaSq); } } } densities[exampleOffset] = density; } /** * \brief Computes the parent and initial cluster indices to assign to the specified example as part of the neighbour-linking step * of the really quick shift (RQS) algorithm. * * \note Each example becomes part of a subtree in which each example has as its parent the closest example with higher density. * For details about RQS, see the paper by Fulkerson and Soatto: http://vision.ucla.edu/~brian/papers/fulkerson10really.pdf * * \param exampleSetIdx The index of the example set containing the example. * \param exampleIdx The index of the example within its example set. * \param exampleSets An image containing the sets of examples to be clustered (one set per row). The width of * the image specifies the maximum number of examples that can be contained in each set. * \param exampleSetCapacity The maximum number of examples in an example set. * \param exampleSetSizes The number of valid examples in each example set. * \param densities An image containing the density of each example (one set per row, one density value per column). * \param tauSq The square of the maximum distance allowed between examples if they are to be linked. * \param parents An image in which to store a parent index for each example. This will generally be the index of * another example in the same set, but may be that of the example itself if it is a subtree root. * \param clusterIndices An image in which to store an initial cluster index for each example. The initial cluster index * for an example will be set to -1 unless the example is a subtree root. * \param nbClustersPerExampleSet An array in which to keep track of the number of clusters in each example set. Must contain zeros * at the point at which the function is called. */ template <typename ExampleType> _CPU_AND_GPU_CODE_TEMPLATE_ inline void compute_parent(int exampleSetIdx, int exampleIdx, const ExampleType *exampleSets, int exampleSetCapacity, const int *exampleSetSizes, const float *densities, float tauSq, int *parents, int *clusterIndices, int *nbClustersPerExampleSet) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Compute the raster offset of the specified example in the example sets image. const int exampleOffset = exampleSetOffset + exampleIdx; // Look up the size of the specified example set. const int exampleSetSize = exampleSetSizes[exampleSetIdx]; // Unless it becomes part of a subtree, each example starts as its own parent. int parentIdx = exampleIdx; // The index of the cluster associated with the specified example (-1 except for subtree roots). int clusterIdx = -1; // If the specified example is valid: if(exampleIdx < exampleSetSize) { // Read in the example and its density from global memory. const ExampleType centreExample = exampleSets[exampleOffset]; const float centreDensity = densities[exampleOffset]; // We are only interested in examples whose distance to the specified example is less than tau. float minDistanceSq = tauSq; // For each other example in the specified example's set: for(int i = 0; i < exampleSetSize; ++i) { if(i == exampleIdx) continue; // Read in the other example and its density from global memory. const ExampleType otherExample = exampleSets[exampleSetOffset + i]; const float otherDensity = densities[exampleSetOffset + i]; // Compute the squared distance between the specified example and the other example. // Note: ExampleType must have a distance_squared function defined for it. const float otherDistSq = distance_squared(centreExample, otherExample); // We are looking for the closest example with a higher density (doesn't matter by how much) than that of the specified example. if(otherDensity > centreDensity && otherDistSq < minDistanceSq) { minDistanceSq = otherDistSq; parentIdx = i; } } // If the specified example is still its own parent (i.e. it is a subtree root), we didn't find any close // example with a higher density, so grab a unique cluster index for the example. if(parentIdx == exampleIdx) { #ifdef __CUDACC__ clusterIdx = atomicAdd(&nbClustersPerExampleSet[exampleSetIdx], 1); #else #ifdef WITH_OPENMP #pragma omp atomic capture #endif clusterIdx = nbClustersPerExampleSet[exampleSetIdx]++; #endif } } // Write the parent of the specified example to global memory. parents[exampleOffset] = parentIdx; // Write the cluster index associated with the example to global memory. (This will be -1 unless the example is a subtree root.) clusterIndices[exampleOffset] = clusterIdx; } /** * \brief Computes the parameters for and stores the specified selected cluster for the specified example set. * * \note The examples in the set must already have been grouped into clusters by the other functions in this file, * and suitable clusters must have been selected for creation. * \note The actual cluster parameters depend on the ClusterType; for this reason, cluster creation is delegated to a function * called create_cluster_from_examples, which must be defined (elsewhere) for the relevant ExampleType and ClusterType pair. * * \param exampleSetIdx The index of the example set for which the selected cluster is being created. * \param selectedClusterIdx The index of the selected cluster to create (this is an index into the selected clusters array). * \param exampleSets An image containing the sets of examples that have been clustered (one set per row). The width * of the image specifies the maximum number of examples that can be contained in each set. * \param exampleSetSizes The number of valid examples in each example set. * \param exampleSetCapacity The maximum size of each example set. * \param clusterIndices An image containing the cluster indices for the examples. * \param selectedClusters The indices of the clusters selected for each example set. * \param maxSelectedClusters The maximum number of clusters to extract from each example set. * \param clusterContainers The output containers in which to store the clusters created for each example set. */ template <typename ExampleType, typename ClusterType, int MaxClusters> _CPU_AND_GPU_CODE_TEMPLATE_ inline void create_selected_cluster(int exampleSetIdx, int selectedClusterIdx, const ExampleType *exampleSets, const int *exampleSetSizes, int exampleSetCapacity, const int *clusterIndices, const int *selectedClusters, int maxSelectedClusters, Array<ClusterType,MaxClusters> *clusterContainers) { // Compute the linear offset to the beginning of the data associated with the selected clusters for the specified example set. const int selectedClustersOffset = exampleSetIdx * maxSelectedClusters; // Look up the real cluster index of the specified selected cluster (e.g. if this is the second selected // cluster for the example set, and selectedClusters[1] = 23, then the real cluster index is 23). const int clusterIdx = selectedClusters[selectedClustersOffset + selectedClusterIdx]; // If the specified selected cluster is valid: if(clusterIdx >= 0) { // Compute the linear offset to the beginning of the data associated with the specified example set // in the example sets and cluster indices images. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Get a reference to the output clusters array for the specified example set. Array<ClusterType,MaxClusters>& outputClusters = clusterContainers[exampleSetIdx]; // Compute the index in the output clusters array at which to store the selected cluster once it has been created. int outputClusterIdx = -1; #ifdef __CUDACC__ outputClusterIdx = atomicAdd(&outputClusters.size, 1); #else #ifdef WITH_OPENMP #pragma omp atomic capture #endif outputClusterIdx = outputClusters.size++; #endif // Create the cluster and write it into the output clusters array at the specified location. create_cluster_from_examples( clusterIdx, exampleSets + exampleSetOffset, clusterIndices + exampleSetOffset, exampleSetSizes[exampleSetIdx], outputClusters.elts[outputClusterIdx] ); } } /** * \brief Resets a cluster container. * * \param exampleSetIdx The index of the example set whose cluster container we want to reset. * \param clusterContainers A pointer to the cluster containers. */ template <typename ClusterType, int MaxClusters> _CPU_AND_GPU_CODE_TEMPLATE_ inline void reset_cluster_container(int exampleSetIdx, Array<ClusterType,MaxClusters> *clusterContainers) { // It is sufficient to just reset the size of the container (i.e. the number of clusters) to zero. // There is no need to modify the actual clusters, since they will be overwritten later. clusterContainers[exampleSetIdx].size = 0; } /** * \brief Resets the temporary variables associated with the specified example set. * * \param exampleSetIdx The index of the example set for which we are resetting the temporary variables. * \param exampleSetCapacity The maximum size of each example set. * \param nbClustersPerExampleSet The number of clusters extracted from each example set. * \param clusterSizes An image containing the sizes of the extracted clusters (for all example sets). * \param clusterSizeHistograms The histograms of cluster sizes for the different example sets. */ _CPU_AND_GPU_CODE_ inline void reset_temporaries_for_set(int exampleSetIdx, int exampleSetCapacity, int *nbClustersPerExampleSet, int *clusterSizes, int *clusterSizeHistograms) { // Reset the number of clusters extracted from the specified example set to zero. nbClustersPerExampleSet[exampleSetIdx] = 0; // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Reset the cluster sizes and histogram values associated with the specified example set. for(int i = 0; i < exampleSetCapacity; ++i) { clusterSizes[exampleSetOffset + i] = 0; clusterSizeHistograms[exampleSetOffset + i] = 0; } } /** * \brief Selects the largest clusters for the specified example set and writes their indices into the selected clusters image. * * \param exampleSetIdx The index of the example set for which to select clusters. * \param clusterSizes An image containing the sizes of the extracted clusters (for all example sets). * \param clusterSizeHistograms The histograms of cluster sizes for the different example sets. * \param nbClustersPerExampleSet The number of clusters extracted from each example set. * \param exampleSetCapacity The mamimum size of each example set. * \param maxSelectedClusters The maximum number of clusters to keep for each example set. * \param minClusterSize The minimum size of cluster to keep. * \param selectedClusters An image in which to store the indices of the clusters selected for each example set. */ _CPU_AND_GPU_CODE_ inline void select_clusters_for_set(int exampleSetIdx, const int *clusterSizes, const int *clusterSizeHistograms, const int *nbClustersPerExampleSet, int exampleSetCapacity, int maxSelectedClusters, int minClusterSize, int *selectedClusters) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Look up the number of valid clusters associated with the specified example set. const int nbValidClusters = nbClustersPerExampleSet[exampleSetIdx]; // Compute the linear offset to the beginning of the data associated with the selected clusters for the specified example set. const int selectedClustersOffset = exampleSetIdx * maxSelectedClusters; // Reset the selected clusters for the specified example set (by setting all selected cluster indices to an invalid value). for(int i = 0; i < maxSelectedClusters; ++i) { selectedClusters[selectedClustersOffset + i] = -1; } // Starting from the largest clusters, scan downwards in the histogram to find the minimum size of cluster // we need to consider in order to try to select maxSelectedClusters clusters. Note that we will not be // able to select maxSelectedClusters if there are fewer suitable clusters than that to start with; if // that happens, we simply keep all of the suitable clusters we do have. int nbSelectedClusters = 0; int nbSmallestClustersToKeep = 0; int minSelectedClusterSize = exampleSetCapacity; while(minSelectedClusterSize > minClusterSize && nbSelectedClusters < maxSelectedClusters) { --minSelectedClusterSize; nbSmallestClustersToKeep = MIN(clusterSizeHistograms[exampleSetOffset + minSelectedClusterSize], maxSelectedClusters - nbSelectedClusters); nbSelectedClusters += nbSmallestClustersToKeep; } // If we couldn't find any suitable clusters at all, early out. if(nbSelectedClusters == 0) return; // Now walk through all of the clusters we do have, selecting (a) all of those whose size is strictly greater // than the minimum selected cluster size, and (b) as many as necessary of those whose size is exactly equal // to the minimum selected cluster size. nbSelectedClusters = 0; int nbSmallestClustersKept = 0; for(int clusterIdx = 0; clusterIdx < nbValidClusters; ++clusterIdx) { const int clusterSize = clusterSizes[exampleSetOffset + clusterIdx]; if(clusterSize > minSelectedClusterSize || (clusterSize == minSelectedClusterSize && nbSmallestClustersKept++ < nbSmallestClustersToKeep)) { selectedClusters[selectedClustersOffset + nbSelectedClusters++] = clusterIdx; } } // Sort the selected clusters in non-increasing order of size using a simple selection sort. // Note: Selection sort is quadratic, but the number of clusters is small enough for now that it doesn't matter. for(int i = 0; i < nbSelectedClusters; ++i) { // Find a cluster with maximum size in selectedClusters[i..nbSelectedClusters). int maxSize = clusterSizes[exampleSetOffset + selectedClusters[selectedClustersOffset + i]]; int maxIdx = i; for(int j = i + 1; j < nbSelectedClusters; ++j) { int size = clusterSizes[exampleSetOffset + selectedClusters[selectedClustersOffset + j]]; if(size > maxSize) { maxSize = size; maxIdx = j; } } // If selectedClusters[i] wasn't the maximal cluster, swap it with the cluster that was. if(maxIdx != i) { int temp = selectedClusters[selectedClustersOffset + i]; selectedClusters[selectedClustersOffset + i] = selectedClusters[selectedClustersOffset + maxIdx]; selectedClusters[selectedClustersOffset + maxIdx] = temp; } } } /** * \brief Updates the cluster size histogram for the specified example set based on the size of the specified cluster. * * \note The cluster size histograms will be used later to select the largest clusters in each example set. * * \param exampleSetIdx The index of the example set whose histogram should be updated. * \param clusterIdx The index of the cluster whose size should be used to update the histogram. * \param clusterSizes An image containing the sizes of the extracted clusters (for all example sets). * \param clusterSizeHistograms An image storing a cluster size histogram for each example set under consideration (one histogram per row). * \param exampleSetCapacity The maximum number of elements in each example set. */ _CPU_AND_GPU_CODE_ inline void update_cluster_size_histogram(int exampleSetIdx, int clusterIdx, const int *clusterSizes, int *clusterSizeHistograms, int exampleSetCapacity) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Look up the size of the specified cluster. const int clusterSize = clusterSizes[exampleSetOffset + clusterIdx]; // Atomically increment the corresponding bin in the histogram. // Note: The __CUDA_ARCH__ check is needed because this function is not a template. #if defined(__CUDACC__) && defined(__CUDA_ARCH__) atomicAdd(&clusterSizeHistograms[exampleSetOffset + clusterSize], 1); #else #ifdef WITH_OPENMP #pragma omp atomic #endif clusterSizeHistograms[exampleSetOffset + clusterSize]++; #endif } } #endif
7551.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute parallel for simd private(j) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
GB_binop__first_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__first_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__first_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__first_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint8) // A*D function (colscale): GB (_AxD__first_uint8) // D*A function (rowscale): GB (_DxB__first_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__first_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__first_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_UINT8 || GxB_NO_FIRST_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
flip_op.h
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <bitset> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; constexpr size_t dim_bitset_size = 64; template <typename DeviceContext, typename T> class FlipKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override; }; template <typename T> class FlipKernel<platform::CPUDeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* x = ctx.Input<Tensor>("X"); Tensor* out = ctx.Output<Tensor>("Out"); auto flip_dims = ctx.template Attr<std::vector<int>>("dims"); auto x_dims = x->dims(); const int total_dims = x_dims.size(); std::bitset<dim_bitset_size> dim_bitset; for (size_t i = 0; i < flip_dims.size(); ++i) { int dim = flip_dims[i]; if (flip_dims[i] < 0) { dim += total_dims; } dim_bitset[dim] = true; } auto x_strides = framework::stride(x_dims); auto numel = x->numel(); const T* x_data = x->data<T>(); T* out_data = out->mutable_data<T>(ctx.GetPlace()); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < numel; ++i) { int64_t cur_indices = i; int64_t rem = 0; int64_t dst_offset = 0; for (int d = 0; d < total_dims; ++d) { int64_t temp = cur_indices; cur_indices = cur_indices / x_strides[d]; rem = temp - cur_indices * x_strides[d]; dst_offset += dim_bitset[d] ? (x_dims[d] - 1 - cur_indices) * x_strides[d] : cur_indices * x_strides[d]; cur_indices = rem; } out_data[i] = x_data[dst_offset]; } } }; } // namespace operators } // namespace paddle
bml_norm_ellsort_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_norm.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_norm_ellsort.h" #include "bml_types_ellsort.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Calculate the sum of squares of the elements of a matrix. * * \ingroup norm_group * * \param A The matrix A * \return The sum of squares of A */ double TYPED_FUNC( bml_sum_squares_ellsort) ( bml_matrix_ellsort_t * A) { int N = A->N; int M = A->M; int *A_nnz = (int *) A->nnz; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; int myRank = bml_getMyRank(); #pragma omp parallel for \ shared(N, M, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax, myRank) \ reduction(+:sum) //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { for (int j = 0; j < A_nnz[i]; j++) { REAL_T xval = A_value[ROWMAJOR(i, j, N, M)]; sum += xval * xval; } } return (double) REAL_PART(sum); } /** Calculate the sum of squares of all the core elements of a submatrix. * * \ingroup norm_group * * \param A The matrix * \param core_pos Core rows of submatrix * \param core_size Number of core rows * \return The sum of squares of A */ double TYPED_FUNC( bml_sum_squares_submatrix_ellsort) ( bml_matrix_ellsort_t * A, int core_size) { int N = A->N; int M = A->M; int *A_index = (int *) A->index; int *A_nnz = (int *) A->nnz; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; #pragma omp parallel for \ shared(N, M, A_index, A_nnz, A_value) \ reduction(+:sum) for (int i = 0; i < core_size; i++) { for (int j = 0; j < A_nnz[i]; j++) { if (A_index[ROWMAJOR(i, j, N, M)] < core_size) { REAL_T value = A_value[ROWMAJOR(i, j, N, M)]; sum += value * value; } } } return (double) REAL_PART(sum); } /** Calculate the sum of squares of the elements of \alpha A + \beta B. * * \ingroup norm_group * * \param A The matrix A * \param B The matrix B * \param alpha Multiplier for A * \param beta Multiplier for B * \pram threshold Threshold * \return The sum of squares of \alpha A + \beta B */ double TYPED_FUNC( bml_sum_squares2_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_ellsort_t * B, double alpha, double beta, double threshold) { int A_N = A->N; int A_M = A->M; int B_N = B->N; int B_M = B->M; int *A_index = (int *) A->index; int *A_nnz = (int *) A->nnz; int *B_index = (int *) B->index; int *B_nnz = (int *) B->nnz; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; REAL_T *B_value = (REAL_T *) B->value; REAL_T alpha_ = (REAL_T) alpha; REAL_T beta_ = (REAL_T) beta; int myRank = bml_getMyRank(); #if !(defined(__IBMC__) || defined(__ibmxl__)) REAL_T y[A_N]; int ix[A_N], jjb[A_N]; memset(y, 0.0, A_N * sizeof(REAL_T)); memset(ix, 0, A_N * sizeof(int)); memset(jjb, 0, A_N * sizeof(int)); #endif #if defined(__IBMC__) || defined(__ibmxl__) #pragma omp parallel for \ shared(alpha_, beta_) \ shared(A_N, A_M, A_index, A_nnz, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_N, B_M, B_index, B_nnz, B_value) \ reduction(+:sum) #else #pragma omp parallel for \ shared(alpha_, beta_) \ shared(A_N, A_M, A_index, A_nnz, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_N, B_M, B_index, B_nnz, B_value) \ firstprivate(ix, jjb, y) \ reduction(+:sum) #endif //for (int i = 0; i < A_N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { #if defined(__IBMC__) || defined(__ibmxl__) REAL_T y[A_N]; int ix[A_N], jjb[A_N]; memset(ix, 0, A_N * sizeof(int)); #endif int l = 0; for (int jp = 0; jp < A_nnz[i]; jp++) { int k = A_index[ROWMAJOR(i, jp, A_N, A_M)]; if (ix[k] == 0) { y[k] = 0.0; ix[k] = i + 1; jjb[l] = k; l++; } y[k] += alpha_ * A_value[ROWMAJOR(i, jp, A_N, A_M)]; } for (int jp = 0; jp < B_nnz[i]; jp++) { int k = B_index[ROWMAJOR(i, jp, B_N, B_M)]; if (ix[k] == 0) { y[k] = 0.0; ix[k] = i + 1; jjb[l] = k; l++; } y[k] += beta_ * B_value[ROWMAJOR(i, jp, B_N, B_M)]; } for (int jp = 0; jp < l; jp++) { if (ABS(y[jjb[jp]]) > threshold) sum += y[jjb[jp]] * y[jjb[jp]]; ix[jjb[jp]] = 0; y[jjb[jp]] = 0.0; jjb[jp] = 0; } } return (double) REAL_PART(sum); } /** Calculate the Frobenius norm of matrix A. * * \ingroup norm_group * * \param A The matrix A * \return The Frobenius norm of A */ double TYPED_FUNC( bml_fnorm_ellsort) ( bml_matrix_ellsort_t * A) { double fnorm = TYPED_FUNC(bml_sum_squares_ellsort) (A); #ifdef DO_MPI if (bml_getNRanks() > 1 && A->distribution_mode == distributed) { bml_sumRealReduce(&fnorm); } #endif fnorm = sqrt(fnorm); return (double) REAL_PART(fnorm); } /** Calculate the Frobenius norm of 2 matrices. * * \ingroup norm_group * * \param A The matrix A * \param B The matrix B * \return The Frobenius norm of A-B */ double TYPED_FUNC( bml_fnorm2_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_ellsort_t * B) { int N = A->N; int M = A->M; double fnorm = 0.0; REAL_T rvalue; int *A_nnz = (int *) A->nnz; int *A_index = (int *) A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T *A_value = (REAL_T *) A->value; int *B_nnz = (int *) B->nnz; int *B_index = (int *) B->index; REAL_T *B_value = (REAL_T *) B->value; REAL_T temp; int myRank = bml_getMyRank(); #pragma omp parallel for \ private(rvalue, temp) \ shared(N, M, A_nnz, A_index, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_nnz, B_index, B_value) \ reduction(+:fnorm) //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { for (int j = 0; j < A_nnz[i]; j++) { for (int k = 0; k < B_nnz[i]; k++) { if (A_index[ROWMAJOR(i, j, N, M)] == B_index[ROWMAJOR(i, k, N, M)]) { rvalue = B_value[ROWMAJOR(i, k, N, M)]; break; } rvalue = 0.0; } temp = A_value[ROWMAJOR(i, j, N, M)] - rvalue; fnorm += temp * temp; } for (int j = 0; j < B_nnz[i]; j++) { for (int k = 0; k < A_nnz[i]; k++) { if (A_index[ROWMAJOR(i, k, N, M)] == B_index[ROWMAJOR(i, j, N, M)]) { rvalue = A_value[ROWMAJOR(i, k, N, M)]; break; } rvalue = 0.0; } if (rvalue == 0.0) { temp = B_value[ROWMAJOR(i, j, N, M)]; fnorm += temp * temp; } } } #ifdef DO_MPI if (bml_getNRanks() > 1 && A->distribution_mode == distributed) { bml_sumRealReduce(&fnorm); } #endif fnorm = sqrt(fnorm); return (double) REAL_PART(fnorm); }
hello_openmp.c
/****************************************************************************** * FILE: omp_hello.c * DESCRIPTION: * OpenMP Example - Hello World - C/C++ Version * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
bisectingKmeans.c
/* Kalign - a multiple sequence alignment program Copyright 2006, 2019 Timo Lassmann This file is part of kalign. Kalign is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. */ #ifdef HAVE_OPENMP #include <omp.h> #endif #include <xmmintrin.h> #include "tlrng.h" #include "msa.h" #include "bisectingKmeans.h" #include "global.h" #include "msa.h" #include "aln_task.h" #include "sequence_distance.h" #include "euclidean_dist.h" /* #include "alignment.h" */ #include "pick_anchor.h" #include "esl_stopwatch.h" struct node{ struct node* left; struct node* right; int id; int d; }; struct kmeans_result{ int* sl; int* sr; int nl; int nr; float score; }; static struct kmeans_result* alloc_kmeans_result(int num_samples); static void free_kmeans_results(struct kmeans_result* k); struct node* upgma(float **dm,int* samples, int numseq); struct node* alloc_node(void); int label_internal(struct node*n, int label); //int label_internal(struct node*n, int label); int* readbitree(struct node* p,int* tree); /* void print_tree(struct node*n, struct aln_tasks *t) ; */ static void create_tasks(struct node*n, struct aln_tasks* t); static int sort_tasks_by_priority(const void *a, const void *b); /* void printTree(struct node* curr,int depth); */ //struct node* bisecting_kmeans(struct msa* msa, struct node* n, float** dm,int* samples,int numseq, int num_anchors,int num_samples,struct rng_state* rng); static struct node* bisecting_kmeans(struct msa* msa, struct node* n, float** dm,int* samples,int numseq, int num_anchors,int num_samples,struct rng_state* rng, int d); int build_tree_kmeans(struct msa* msa, struct aln_param* ap,struct aln_tasks** tasks) { //struct drand48_data randBuffer; struct aln_tasks* t = NULL; struct node* root = NULL; float** dm = NULL; int* samples = NULL; int* anchors = NULL; int num_anchors; int numseq; int i; ASSERT(msa != NULL, "No alignment."); //ASSERT(param != NULL, "No input parameters."); ASSERT(ap != NULL, "No alignment parameters."); t = *tasks; numseq = msa->numseq; DECLARE_TIMER(timer); /* pick anchors . */ // LOG_MSG("Calculating pairwise distances"); START_TIMER(timer); // LOG_MSG("pick_anchor"); RUNP(anchors = pick_anchor(msa, &num_anchors)); // LOG_MSG("d_estimation"); // fprintf(stdout,"anchors:%d,num_anchors:%d\n",*anchors,num_anchors); RUNP(dm = d_estimation(msa, anchors, num_anchors,0));//les,int pair) // LOG_MSG("done"); STOP_TIMER(timer); GET_TIMING(timer); //LOG_MSG("Done in %f sec.", GET_TIMING(timer)); MFREE(anchors); MMALLOC(samples, sizeof(int)* numseq); for(i = 0; i < numseq;i++){ samples[i] = i; } //RUNP(root = alloc_node()); START_TIMER(timer); // LOG_MSG("Building guide tree."); #ifdef HAVE_OPENMP /* omp_set_num_threads(4); */ #pragma omp parallel // Only the first thread will spawn other threads #pragma omp single nowait { #endif root = bisecting_kmeans(msa,root, dm, samples, numseq, num_anchors, numseq, ap->rng,0); #ifdef HAVE_OPENMP } #endif STOP_TIMER(timer); GET_TIMING(timer); //LOG_MSG("Done in %f sec.", GET_TIMING(timer)); label_internal(root, numseq); create_tasks(root, t); qsort(t->list, t->n_tasks, sizeof(struct task*), sort_tasks_by_priority); /* for(i = 0; i < t->n_tasks;i++){ */ /* fprintf(stdout,"%3d %3d -> %3d (p: %d)\n", t->list[i]->a, t->list[i]->b, t->list[i]->c, t->list[i]->p); */ /* } */ //*task_list = t; /*exit(0); ap->tree[0] = 1; ap->tree = readbitree(root, ap->tree); for (i = 0; i < (numseq*3);i++){ tree[i] = tree[i+1]; }*/ MFREE(root); for(i =0 ; i < msa->numseq;i++){ _mm_free(dm[i]); } MFREE(dm); DESTROY_TIMER(timer); return OK; ERROR: return FAIL; } struct node* bisecting_kmeans(struct msa* msa, struct node* n, float** dm,int* samples,int numseq, int num_anchors,int num_samples,struct rng_state* rng, int d) { struct kmeans_result* res_tmp = NULL; struct kmeans_result* best = NULL; struct kmeans_result* res_ptr = NULL; int tries = 50; int t_iter; int r; int* sl = NULL; int* sr = NULL; int num_l,num_r; float* w = NULL; float* wl = NULL; float* wr = NULL; float* cl = NULL; float* cr = NULL; float dl = 0.0F; float dr = 0.0F; float score; int i,j,s; int num_var; int stop = 0; if(num_samples < 100){ float** dm = NULL; RUNP(dm = d_estimation(msa, samples, num_samples,1));// anchors, num_anchors,1)); n = upgma(dm,samples, num_samples); gfree(dm); MFREE(samples); return n; } num_var = num_anchors / 8; if( num_anchors%8){ num_var++; } num_var = num_var << 3; wr = _mm_malloc(sizeof(float) * num_var,32); wl = _mm_malloc(sizeof(float) * num_var,32); cr = _mm_malloc(sizeof(float) * num_var,32); cl = _mm_malloc(sizeof(float) * num_var,32); RUNP(best = alloc_kmeans_result(num_samples)); RUNP(res_tmp = alloc_kmeans_result(num_samples)); best->score = FLT_MAX; tries = MACRO_MIN(tries, num_samples); for(t_iter = 0;t_iter < tries;t_iter++){ res_tmp->score = FLT_MAX; sl = res_tmp->sl; sr = res_tmp->sr; w = _mm_malloc(sizeof(float) * num_var,32); for(i = 0; i < num_var;i++){ w[i] = 0.0F; wr[i] = 0.0F; wl[i] = 0.0F; cr[i] = 0.0F; cl[i] = 0.0F; } for(i = 0; i < num_samples;i++){ s = samples[i]; for(j = 0; j < num_anchors;j++){ w[j] += dm[s][j]; } } for(j = 0; j < num_anchors;j++){ w[j] /= (float)num_samples; } //r = tl_random_int(rng , num_samples); //r = sel[t_iter]; r = t_iter; s = samples[r]; //LOG_MSG("Selected %d\n",s); for(j = 0; j < num_anchors;j++){ cl[j] = dm[s][j]; } for(j = 0; j < num_anchors;j++){ cr[j] = w[j] - (cl[j] - w[j]); // fprintf(stdout,"%f %f %f\n", cl[j],cr[j],w[j]); } _mm_free(w); /* check if cr == cl - we have identical sequences */ s = 0; for(j = 0; j < num_anchors;j++){ if(fabsf(cl[j]-cr[j]) > 1.0E-6){ s = 1; break; } } if(!s){ score = 0.0F; num_l = 0; num_r = 0; sl[num_l] = samples[0]; num_l++; for(i =1 ; i <num_samples;i++){ sr[num_r] = samples[i]; num_r++; } }else{ w = NULL; while(1){ stop++; if(stop == 10000){ ERROR_MSG("Failed."); } num_l = 0; num_r = 0; for(i = 0; i < num_anchors;i++){ wr[i] = 0.0F; wl[i] = 0.0F; } score = 0.0f; for(i = 0; i < num_samples;i++){ s = samples[i]; #ifdef HAVE_AVX2 edist_256(dm[s], cl, num_anchors, &dl); edist_256(dm[s], cr, num_anchors, &dr); #else edist_serial(dm[s], cl, num_anchors, &dl); edist_serial(dm[s], cr, num_anchors, &dr); #endif score += MACRO_MIN(dl,dr); if(dr < dl){ w = wr; sr[num_r] = s; num_r++; }else if (dr > dl){ w = wl; sl[num_l] = s; num_l++; }else{ if(i & 1){ w = wr; sr[num_r] = s; num_r++; }else{ w = wl; sl[num_l] = s; num_l++; } } for(j = 0; j < num_anchors;j++){ w[j] += dm[s][j]; } } for(j = 0; j < num_anchors;j++){ wl[j] /= (float)num_l; wr[j] /= (float)num_r; } s = 0; for(j = 0; j < num_anchors;j++){ if(wl[j] != cl[j]){ s = 1; break; } if(wr[j] != cr[j]){ s = 1; break; } } if(s){ w = cl; cl = wl; wl = w; w = cr; cr = wr; wr = w; }else{ break; } } } res_tmp->nl = num_l; res_tmp->nr = num_r; res_tmp->score = score; if(res_tmp->score < best->score){ //LOG_MSG("Better!!! %f %f", res_tmp->score,best->score); res_ptr = res_tmp; res_tmp = best; best = res_ptr; } } free_kmeans_results(res_tmp); sl = best->sl; sr = best->sr; num_l = best->nl; num_r = best->nr; MFREE(best); _mm_free(wr); _mm_free(wl); _mm_free(cr); _mm_free(cl); MFREE(samples); n = alloc_node(); //LOG_MSG("Done"); #ifdef HAVE_OPENMP #pragma omp task shared(n) if(numseq > 2000) #endif n->left = bisecting_kmeans(msa,n->left, dm, sl, numseq, num_anchors, num_l,rng,d); #ifdef HAVE_OPENMP #pragma omp task shared(n) if(numseq > 2000) #endif n->right = bisecting_kmeans(msa,n->right, dm, sr, numseq, num_anchors, num_r,rng,d); #ifdef HAVE_OPENMP #pragma omp taskwait #endif return n; ERROR: return NULL; } struct node* upgma(float **dm,int* samples, int numseq) { struct node** tree = NULL; struct node* tmp = NULL; int i,j; int *as = NULL; float max; int node_a = 0; int node_b = 0; int cnode = numseq; int numprofiles; numprofiles = (numseq << 1) - 1; MMALLOC(as,sizeof(int)*numseq); for (i = numseq; i--;){ as[i] = i+1; } MMALLOC(tree,sizeof(struct node*)*numseq); for (i = 0;i < numseq;i++){ tree[i] = NULL; tree[i] = alloc_node(); tree[i]->id = samples[i]; } while (cnode != numprofiles){ max = FLT_MAX; for (i = 0;i < numseq-1; i++){ if (as[i]){ for ( j = i + 1;j < numseq;j++){ if (as[j]){ if (dm[i][j] < max){ max = dm[i][j]; node_a = i; node_b = j; } } } } } tmp = NULL; tmp = alloc_node(); tmp->left = tree[node_a]; tmp->right = tree[node_b]; tree[node_a] = tmp; tree[node_b] = NULL; /*deactivate sequences to be joined*/ as[node_a] = cnode+1; as[node_b] = 0; cnode++; /*calculate new distances*/ for (j = numseq;j--;){ if (j != node_b){ dm[node_a][j] = (dm[node_a][j] + dm[node_b][j])*0.5F + 0.001F; } //fprintf(stdout,"\n"); } dm[node_a][node_a] = 0.0F; for (j = numseq;j--;){ dm[j][node_a] = dm[node_a][j]; } } tmp = tree[node_a]; MFREE(tree); MFREE(as); return tmp; ERROR: return NULL; } struct node* alloc_node(void) { struct node* n = NULL; MMALLOC(n, sizeof(struct node)); n->left = NULL; n->right = NULL; n->id = -1; n->d = 0; return n; ERROR: return NULL; } int label_internal(struct node*n, int label) { //n->d = d; if(n->left){ label = label_internal(n->left, label); } if(n->right){ label = label_internal(n->right, label); } if(n->left && n->right){ n->d = MACRO_MAX(n->left->d,n->right->d) + 1; } if(n->id == -1){ n->id = label; label++; } return label; } void create_tasks(struct node*n, struct aln_tasks* t) { int i; for(i = 0; i < n->d;i++){ //fprintf(stdout," "); } if(n->left && n->right){ struct task*task; task = t->list[t->n_tasks]; task->a = n->left->id; task->b = n->right->id; task->c = n->id; task->p = n->d; //fprintf(stdout,"Node %d %d depends on %d %d \n", n->id, n->d , n->left->id, n->right->id); t->n_tasks++; } if(n->left){ create_tasks(n->left,t); } if(n->right){ create_tasks(n->right,t); } if(n->left){ if(n->right){ MFREE(n->left); MFREE(n->right); } } } int* readbitree(struct node* p,int* tree) { if(p->left){ tree = readbitree(p->left,tree); } if(p->right){ tree = readbitree(p->right,tree); } if(p->left){ if(p->right){ tree[tree[0]] = p->left->id; tree[tree[0]+1] = p->right->id; tree[tree[0]+2] = p->id; tree[0] +=3; MFREE(p->left); MFREE(p->right); } } return tree; } struct kmeans_result* alloc_kmeans_result(int num_samples) { struct kmeans_result* k = NULL; ASSERT(num_samples != 0, "No samples???"); MMALLOC(k, sizeof(struct kmeans_result)); k->nl = 0; k->nr = 0; k->sl = NULL; k->sr = NULL; MMALLOC(k->sl, sizeof(int) * num_samples); MMALLOC(k->sr, sizeof(int) * num_samples); k->score = FLT_MAX; return k; ERROR: free_kmeans_results(k); return NULL; } void free_kmeans_results(struct kmeans_result* k) { if(k){ if(k->sl){ MFREE(k->sl); } if(k->sr){ MFREE(k->sr); } MFREE(k); } } int sort_tasks_by_priority(const void *a, const void *b) { struct task* const *one = a; struct task* const *two = b; if((*one)->p >= (*two)->p){ return 1; }else{ return -1; } }
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireMagickMemory(sizeof(*image)); if (image == (Image *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MaxTextExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=time((time_t *) NULL); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->matte_color=image_info->matte_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ image->image_info=(ImageInfo *) NULL; option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MaxTextExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MaxTextExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait == BlendPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } append_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; Image *image; MagickBooleanType proceed; image=CloneImage(next,0,0,MagickTrue,exception); if (image == (Image *) NULL) break; (void) TransformImageColorspace(image,append_image->colorspace,exception); SetGeometry(append_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelReadMask(image,p) == 0) { SetPixelBackgoundColor(append_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(append_image); continue; } GetPixelInfoPixel(image,p,&pixel); SetPixelInfoPixel(append_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=DestroyImage(image); proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,clip_mask,exception); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { Image *clone_image; double scale; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image)); if (clone_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->read_mask=image->read_mask; clone_image->write_mask=image->write_mask; clone_image->alpha_trait=image->alpha_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickMemory(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MaxTextExtent); (void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent); (void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->columns=columns; clone_image->rows=rows; clone_image->cache=ClonePixelCache(image->cache); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; (void) CloneString(&clone_info->size,image_info->size); (void) CloneString(&clone_info->extract,image_info->extract); (void) CloneString(&clone_info->scenes,image_info->scenes); (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; (void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor); (void) CloneString(&clone_info->server_name,image_info->server_name); (void) CloneString(&clone_info->font,image_info->font); (void) CloneString(&clone_info->texture,image_info->texture); (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->matte_color=image_info->matte_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->preview_type=image_info->preview_type; clone_info->group=image_info->group; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; (void) CloneString(&clone_info->view,image_info->view); clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; (void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent); (void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MaxTextExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info*) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) ResetMagickMemory(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); mask_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (mask_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); mask_image->read_mask=MagickFalse; image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; int c; MagickBooleanType canonical; register const char *p; size_t length; canonical=MagickFalse; length=0; (void) CopyMagickString(filename,format,MaxTextExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } if (*q == '0') { ssize_t value; value=(ssize_t) strtol(q,&q,10); (void) value; } switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent- (p-format)),p,value); *q=c; (void) ConcatenateMagickString(filename,q,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MaxTextExtent]; const char *value; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ /* FUTURE: Compare update with code from InterpretImageProperties() Note that a 'filename:' property should not need depth recursion. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; value=(const char *) NULL; #if 0 // FUTURE: remove this code. -- Anthony 29 Arpil 2012 // Removed as GetMagickProperty() will will never match a "filename:" // string as this is not a 'known' image property. // if ((image_info != (const ImageInfo *) NULL) && (image != (const Image *) NULL)) value=GetMagickProperty(image_info,image,pattern,exception); else #endif if (image != (Image *) NULL) value=GetImageProperty(image,pattern,exception); if ((value == (const char *) NULL) && (image != (Image *) NULL)) value=GetImageArtifact(image,pattern); if ((value == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) value=GetImageOption(image_info,pattern); if (value == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-length),value,(size_t) (MaxTextExtent-(p-format-length))); length+=strlen(pattern)-1; *q=c; (void) ConcatenateMagickString(filename,r+1,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) { (void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename))); canonical=MagickTrue; } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MaxTextExtent); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MaxTextExtent], filename[MaxTextExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); (void) CopyMagickString(magick,image->magick,MaxTextExtent); (void) CopyMagickString(filename,image->filename,MaxTextExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelInfoPixel(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,sRGBColorspace,exception); if ((image->background_color.alpha_trait == BlendPixelTrait) && (image->alpha_trait != BlendPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelInfoPixel(image,&image->background_color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { ChannelType mask; mask=image->channel_mask; image->channel_mask=channel_mask; SetPixelChannelMask(image,channel_mask); return(mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelInfoPixel(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) return(MagickFalse); image->columns=columns; image->rows=rows; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char extension[MaxTextExtent], filename[MaxTextExtent], magic[MaxTextExtent], *q, subimage[MaxTextExtent]; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; unsigned char magick[2*MaxTextExtent]; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *subimage='\0'; GetPathComponent(image_info->filename,SubimagePath,subimage); if (*subimage != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse) { if (IsGeometry(subimage) != MagickFalse) (void) CloneString(&image_info->extract,subimage); } else { size_t first, last; (void) CloneString(&image_info->scenes,subimage); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *extension='\0'; GetPathComponent(image_info->filename,ExtensionPath,extension); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*extension != '\0') if ((LocaleCompare(extension,"gz") == 0) || (LocaleCompare(extension,"Z") == 0) || (LocaleCompare(extension,"svgz") == 0) || (LocaleCompare(extension,"wmz") == 0)) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*extension != '\0') if (LocaleCompare(extension,"bz2") == 0) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if (*extension != '\0') { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "EPHEMERAL", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,extension,MaxTextExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') (void) CopyMagickString(magic,image_info->magick,MaxTextExtent); else { /* User specified image format. */ LocaleUpper(magic); if (IsMagickConflict(magic) == MagickFalse) { (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); if (LocaleCompare(magic,"EPHEMERAL") != 0) image_info->affirm=MagickTrue; else image_info->temporary=MagickTrue; } } magick_info=GetMagickInfo(magic,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,filename,exception); if ((LocaleCompare(filename,image_info->filename) != 0) && (strchr(filename,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { /* Determine the image format from the first few bytes of the file. */ image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy standard input or pipe to temporary file. */ *filename='\0'; status=ImageToFile(image,filename,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,filename,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); image_info->temporary=MagickTrue; } (void) ResetMagickMemory(magick,0,sizeof(magick)); count=ReadBlob(image,2*MaxTextExtent,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { (void) CopyMagickString(image_info->magick,GetMagicName(magic_info), MaxTextExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const Image *mask, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask, ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (mask == (const Image *) NULL) { image->read_mask=MagickFalse; return(SyncImagePixelCache(image,exception)); } image->read_mask=MagickTrue; if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(mask,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelReadMask(image,ClampToQuantum(GetPixelIntensity(mask,p)),q); p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o Alpha: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelReadMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const Quantum *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const Quantum *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait == BlendPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) exception; DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "EXIF,iCCP,iTXt,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->storage_class == DirectClass) return(MagickFalse); range_exception=MagickFalse; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(range_exception,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelInfoPixel(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); option=GetImageOption(image_info,"channel"); if (option != (const char *) NULL) (void) SetPixelChannelMask(image,(ChannelType) ParseChannelOption(option)); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } ResetImageOptionIterator(image_info); #if 0 { /* IMv6: Copy freeform global options into per-image artifacts, so * various operations and coders can access them. * * This has a problem, as per-image artefacts may have been set in * parenthesis, but may not be unset when parenthesis ends. */ char property[MaxTextExtent]; const char *value; for (option=GetNextImageOption(image_info); option != (const char *) NULL; ) { value=GetImageOption(image_info,option); if (value != (const char *) NULL) { (void) FormatLocaleString(property,MaxTextExtent,"%s",option); (void) SetImageArtifact(image,property,value); } option=GetNextImageOption(image_info); } } #else /* IMv7: pointer to allow the lookup of pre-image artefact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preverved when parenthesis ends. This pointer is never explictally freed, as it is only used as a back reference, not as the main pointer to the image_info structure. Images being removed from a image_info image list (or yet to be added to such), should have this pointer reset to NULL. */ image->image_info=image_info; #endif return(MagickTrue); }
displacement_op_cuda.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef DISPLACEMENT_OP_CUDA_H_ #define DISPLACEMENT_OP_CUDA_H_ #include <vector> #include "bound_space_op.h" #include "gpu/displacement_op_cuda_kernel.h" #include "log.h" #include "resource_manager.h" #include "shape.h" #include "simulation.h" #include "type_util.h" namespace bdm { /// Defines the 3D physical interactions between physical objects template <typename TSimulation = Simulation<>> class DisplacementOpCuda { public: DisplacementOpCuda() {} ~DisplacementOpCuda() {} template <typename TContainer> typename std::enable_if<is_soa_sphere<TContainer>::value>::type operator()( TContainer* cells, uint16_t type_idx) { auto* sim = TSimulation::GetActive(); auto* grid = sim->GetGrid(); auto* param = sim->GetParam(); std::vector<std::array<double, 3>> cell_movements(cells->size()); std::vector<double> mass(cells->size()); std::vector<uint32_t> starts; std::vector<uint16_t> lengths; std::vector<uint32_t> successors(cells->size()); uint32_t box_length; uint32_t num_objects = cells->size(); std::array<uint32_t, 3> num_boxes_axis; std::array<int32_t, 3> grid_dimensions; double squared_radius = grid->GetLargestObjectSize() * grid->GetLargestObjectSize(); // We need to create a mass vector, because it is not stored by default in // a cell container cells->FillMassVector(&mass); grid->GetSuccessors(&successors); grid->GetBoxInfo(&starts, &lengths); grid->GetGridInfo(&box_length, &num_boxes_axis, &grid_dimensions); // If this is the first time we perform physics on GPU using CUDA if (cdo_ == nullptr) { // Allocate 25% more memory so we don't need to reallocate GPU memory // for every (small) change uint32_t new_num_objects = static_cast<uint32_t>(1.25 * num_objects); uint32_t new_num_boxes = static_cast<uint32_t>(1.25 * starts.size()); // Store these extende buffer sizes for future reference num_objects_ = new_num_objects; num_boxes_ = new_num_boxes; // Allocate required GPU memory cdo_ = new DisplacementOpCudaKernel(new_num_objects, new_num_boxes); } else { // If the number of simulation objects increased if (num_objects >= num_objects_) { Log::Info("DisplacementOpCuda", "\nThe number of cells increased signficantly (from ", num_objects_, " to ", num_objects, "), so we allocate bigger GPU buffers\n"); uint32_t new_num_objects = static_cast<uint32_t>(1.25 * num_objects); num_objects_ = new_num_objects; cdo_->ResizeCellBuffers(new_num_objects); } // If the neighbor grid size increased if (starts.size() >= num_boxes_) { Log::Info("DisplacementOpCuda", "\nThe number of boxes increased signficantly (from ", num_boxes_, " to ", "), so we allocate bigger GPU buffers\n"); uint32_t new_num_boxes = static_cast<uint32_t>(1.25 * starts.size()); num_boxes_ = new_num_boxes; cdo_->ResizeGridBuffers(new_num_boxes); } } cdo_->LaunchDisplacementKernel( cells->GetPositionPtr(), cells->GetDiameterPtr(), cells->GetTractorForcePtr(), cells->GetAdherencePtr(), cells->GetBoxIdPtr(), mass.data(), &(param->simulation_time_step_), &(param->simulation_max_displacement_), &squared_radius, &num_objects, starts.data(), lengths.data(), successors.data(), &box_length, num_boxes_axis.data(), grid_dimensions.data(), cell_movements.data()->data()); // set new positions after all updates have been calculated // otherwise some cells would see neighbors with already updated positions // which would lead to inconsistencies #pragma omp parallel for for (size_t i = 0; i < cells->size(); i++) { auto&& cell = (*cells)[i]; cell.UpdatePosition(cell_movements[i]); if (param->bound_space_) { ApplyBoundingBox(&cell, param->min_bound_, param->max_bound_); } cell.SetPosition(cell.GetPosition()); // Reset biological movement to 0. cell.SetTractorForce({0, 0, 0}); } } template <typename TContainer> typename std::enable_if<!is_soa_sphere<TContainer>::value>::type operator()( TContainer* cells, uint16_t type_idx) { Fatal("DisplacementOpCuda", "You tried to compile GPU-specific function calls for a non-SOA data " "structure or non-spherical simulation object."); } private: DisplacementOpCudaKernel* cdo_ = nullptr; uint32_t num_boxes_ = 0; uint32_t num_objects_ = 0; }; } // namespace bdm #endif // DISPLACEMENT_OP_CUDA_H_
atomic_helper.h
#pragma once #include <array.h> /** @file atomic_helper.h @section Provides definitions of atomic functions that are used in QUDA. */ namespace quda { template <bool is_device> struct atomic_fetch_add_impl { template <typename T> inline void operator()(T *addr, T val) { #pragma omp atomic update *addr += val; } }; template <> struct atomic_fetch_add_impl<true> { template <typename T> __device__ inline void operator()(T *addr, T val) { atomicAdd(addr, val); } }; /** @brief atomic_fetch_add function performs similarly as atomic_ref::fetch_add @param[in,out] addr The memory address of the variable we are updating atomically @param[in] val The value we summing to the value at addr */ template <typename T> __device__ __host__ inline void atomic_fetch_add(T *addr, T val) { target::dispatch<atomic_fetch_add_impl>(addr, val); } template <typename T> __device__ __host__ inline void atomic_fetch_add(complex<T> *addr, complex<T> val) { atomic_fetch_add(reinterpret_cast<T *>(addr) + 0, val.real()); atomic_fetch_add(reinterpret_cast<T *>(addr) + 1, val.imag()); } template <typename T, int n> __device__ __host__ inline void atomic_fetch_add(array<T, n> *addr, array<T, n> val) { for (int i = 0; i < n; i++) atomic_fetch_add(&(*addr)[i], val[i]); } template <bool is_device> struct atomic_fetch_abs_max_impl { template <typename T> inline void operator()(T *addr, T val) { #pragma omp atomic update *addr = std::max(*addr, val); } }; template <> struct atomic_fetch_abs_max_impl<true> { /** @brief Implementation of single-precision atomic max specialized for positive-definite numbers. Here we take advantage of the property that when positive floating point numbers are reinterpretted as unsigned integers, they have the same unique sorted order. @param addr Address that stores the atomic variable to be updated @param val Value to be added to the atomic */ __device__ inline void operator()(float *addr, float val) { uint32_t val_ = __float_as_uint(val); uint32_t *addr_ = reinterpret_cast<uint32_t *>(addr); atomicMax(addr_, val_); } }; /** @brief atomic_fetch_max function that does an atomic max. @param[in,out] addr The memory address of the variable we are updating atomically @param[in] val The value we are comparing against. Must be positive valued else result is undefined. */ template <typename T> __device__ __host__ inline void atomic_fetch_abs_max(T *addr, T val) { target::dispatch<atomic_fetch_abs_max_impl>(addr, val); } } // namespace quda
smg_setup_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.16 $ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "smg.h" /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_SMGCreateInterpOp( hypre_StructMatrix *A, hypre_StructGrid *cgrid, HYPRE_Int cdir ) { hypre_StructMatrix *PT; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int stencil_dim; HYPRE_Int num_ghost[] = {1, 1, 1, 1, 1, 1}; HYPRE_Int i; /* set up stencil */ stencil_size = 2; stencil_dim = hypre_StructStencilDim(hypre_StructMatrixStencil(A)); stencil_shape = hypre_CTAlloc(hypre_Index, stencil_size); for (i = 0; i < stencil_size; i++) { hypre_SetIndex(stencil_shape[i], 0, 0, 0); } hypre_IndexD(stencil_shape[0], cdir) = -1; hypre_IndexD(stencil_shape[1], cdir) = 1; stencil = hypre_StructStencilCreate(stencil_dim, stencil_size, stencil_shape); /* set up matrix */ PT = hypre_StructMatrixCreate(hypre_StructMatrixComm(A), cgrid, stencil); hypre_StructMatrixSetNumGhost(PT, num_ghost); hypre_StructStencilDestroy(stencil); return PT; } /*-------------------------------------------------------------------------- * This routine uses SMGRelax to set up the interpolation operator. * * To illustrate how it proceeds, consider setting up the the {0, 0, -1} * stencil coefficient of P^T. This coefficient corresponds to the * {0, 0, 1} coefficient of P. Do one sweep of plane relaxation on the * fine grid points for the system, A_mask x = b, with initial guess * x_0 = all ones and right-hand-side b = all zeros. The A_mask matrix * contains all coefficients of A except for those in the same direction * as {0, 0, -1}. * * The relaxation data for the multigrid algorithm is passed in and used. * When this routine returns, the only modified relaxation parameters * are MaxIter, RegSpace and PreSpace info, the right-hand-side and * solution info. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetupInterpOp( void *relax_data, hypre_StructMatrix *A, hypre_StructVector *b, hypre_StructVector *x, hypre_StructMatrix *PT, HYPRE_Int cdir, hypre_Index cindex, hypre_Index findex, hypre_Index stride ) { hypre_StructMatrix *A_mask; hypre_StructStencil *A_stencil; hypre_Index *A_stencil_shape; HYPRE_Int A_stencil_size; hypre_StructStencil *PT_stencil; hypre_Index *PT_stencil_shape; HYPRE_Int PT_stencil_size; HYPRE_Int *stencil_indices; HYPRE_Int num_stencil_indices; hypre_StructGrid *fgrid; hypre_StructStencil *compute_pkg_stencil; hypre_Index *compute_pkg_stencil_shape; HYPRE_Int compute_pkg_stencil_size = 1; HYPRE_Int compute_pkg_stencil_dim = 1; hypre_ComputePkg *compute_pkg; hypre_ComputeInfo *compute_info; hypre_CommHandle *comm_handle; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *PT_data_box; hypre_Box *x_data_box; double *PTp; double *xp; HYPRE_Int PTi; HYPRE_Int xi; hypre_Index loop_size; hypre_Index start; hypre_Index startc; hypre_Index stridec; HYPRE_Int si, sj, d; HYPRE_Int compute_i, i, j; /*-------------------------------------------------------- * Initialize some things *--------------------------------------------------------*/ hypre_SetIndex(stridec, 1, 1, 1); fgrid = hypre_StructMatrixGrid(A); A_stencil = hypre_StructMatrixStencil(A); A_stencil_shape = hypre_StructStencilShape(A_stencil); A_stencil_size = hypre_StructStencilSize(A_stencil); PT_stencil = hypre_StructMatrixStencil(PT); PT_stencil_shape = hypre_StructStencilShape(PT_stencil); PT_stencil_size = hypre_StructStencilSize(PT_stencil); /* Set up relaxation parameters */ hypre_SMGRelaxSetMaxIter(relax_data, 1); hypre_SMGRelaxSetNumPreSpaces(relax_data, 0); hypre_SMGRelaxSetNumRegSpaces(relax_data, 1); hypre_SMGRelaxSetRegSpaceRank(relax_data, 0, 1); compute_pkg_stencil_shape = hypre_CTAlloc(hypre_Index, compute_pkg_stencil_size); compute_pkg_stencil = hypre_StructStencilCreate(compute_pkg_stencil_dim, compute_pkg_stencil_size, compute_pkg_stencil_shape); for (si = 0; si < PT_stencil_size; si++) { /*----------------------------------------------------- * Compute A_mask matrix: This matrix contains all * stencil coefficients of A except for the coefficients * in the opposite direction of the current P stencil * coefficient being computed (same direction for P^T). *-----------------------------------------------------*/ stencil_indices = hypre_TAlloc(HYPRE_Int, A_stencil_size); num_stencil_indices = 0; for (sj = 0; sj < A_stencil_size; sj++) { if (hypre_IndexD(A_stencil_shape[sj], cdir) != hypre_IndexD(PT_stencil_shape[si], cdir) ) { stencil_indices[num_stencil_indices] = sj; num_stencil_indices++; } } A_mask = hypre_StructMatrixCreateMask(A, num_stencil_indices, stencil_indices); hypre_TFree(stencil_indices); /*----------------------------------------------------- * Do relaxation sweep to compute coefficients *-----------------------------------------------------*/ hypre_StructVectorClearGhostValues(x); hypre_StructVectorSetConstantValues(x, 1.0); hypre_StructVectorSetConstantValues(b, 0.0); hypre_SMGRelaxSetNewMatrixStencil(relax_data, PT_stencil); hypre_SMGRelaxSetup(relax_data, A_mask, b, x); hypre_SMGRelax(relax_data, A_mask, b, x); /*----------------------------------------------------- * Free up A_mask matrix *-----------------------------------------------------*/ hypre_StructMatrixDestroy(A_mask); /*----------------------------------------------------- * Set up compute package for communication of * coefficients from fine to coarse across processor * boundaries. *-----------------------------------------------------*/ hypre_CopyIndex(PT_stencil_shape[si], compute_pkg_stencil_shape[0]); hypre_CreateComputeInfo(fgrid, compute_pkg_stencil, &compute_info); hypre_ComputeInfoProjectSend(compute_info, findex, stride); hypre_ComputeInfoProjectRecv(compute_info, findex, stride); hypre_ComputeInfoProjectComp(compute_info, cindex, stride); hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1, fgrid, &compute_pkg); /*----------------------------------------------------- * Copy coefficients from x into P^T *-----------------------------------------------------*/ for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { xp = hypre_StructVectorData(x); hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle); compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { hypre_FinalizeIndtComputations(comm_handle); compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); PT_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(PT), i); xp = hypre_StructVectorBoxData(x, i); PTp = hypre_StructMatrixBoxData(PT, i, si); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); hypre_CopyIndex(hypre_BoxIMin(compute_box), start); hypre_StructMapFineToCoarse(start, cindex, stride, startc); /* shift start index to appropriate F-point */ for (d = 0; d < 3; d++) { hypre_IndexD(start, d) += hypre_IndexD(PT_stencil_shape[si], d); } hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, x_data_box, start, stride, xi, PT_data_box, startc, stridec, PTi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,PTi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, PTi) { PTp[PTi] = xp[xi]; } hypre_BoxLoop2End(xi, PTi); } } } /*----------------------------------------------------- * Free up compute package info *-----------------------------------------------------*/ hypre_ComputePkgDestroy(compute_pkg); } /* Tell SMGRelax that the stencil has changed */ hypre_SMGRelaxSetNewMatrixStencil(relax_data, PT_stencil); hypre_StructStencilDestroy(compute_pkg_stencil); #if 0 hypre_StructMatrixAssemble(PT); #else hypre_StructInterpAssemble(A, PT, 1, cdir, cindex, stride); #endif return hypre_error_flag; }
ompfor3.c
/* * Decremental loop iteration, * Default loop scheduling */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int a[20]; int main(void) { int i; int j = 100; #pragma omp parallel { #pragma omp single printf ("Using %d threads.\n",omp_get_num_threads()); #pragma omp for nowait firstprivate(j) lastprivate(j) for (i=19;i>-1;i-=3) { a[i]=i*2+j; printf("Iteration %2d is carried out by thread %2d\n",\ i, omp_get_thread_num()); } } return 0; }
GB_transpose_bucket.c
//------------------------------------------------------------------------------ // GB_transpose_bucket: transpose and optionally typecast and/or apply operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = A' or op(A'). Optionally typecasts from A->type to the new type ctype, // and/or optionally applies a unary operator. // If an operator z=op(x) is provided, the type of z must be the same as the // type of C. The type of A must be compatible with the type of of x (A is // typecasted into the type of x). These conditions must be checked in the // caller. // This function is agnostic for the CSR/CSC format of C and A. C_is_csc is // defined by the caller and assigned to C->is_csc, but otherwise unused. // A->is_csc is ignored. // The input can be hypersparse or non-hypersparse. The output C is always // non-hypersparse, and never shallow. On input, C is a static header. // If A is m-by-n in CSC format, with e nonzeros, the time and memory taken is // O(m+n+e) if A is non-hypersparse, or O(m+e) if hypersparse. This is fine if // most rows and columns of A are non-empty, but can be very costly if A or A' // is hypersparse. In particular, if A is a non-hypersparse column vector with // m >> e, the time and memory is O(m), which can be huge. Thus, for // hypersparse matrices, or for very sparse matrices, the qsort method should // be used instead (see GB_transpose). // This method is parallel, but not highly scalable. At most O(e/m) threads // are used. #include "GB_transpose.h" #define GB_FREE_WORKSPACE \ { \ if (Workspaces != NULL && Workspaces_size != NULL) \ { \ for (int tid = 0 ; tid < nworkspaces ; tid++) \ { \ GB_FREE_WORK (&(Workspaces [tid]), Workspaces_size [tid]) ; \ } \ } \ GB_WERK_POP (A_slice, int64_t) ; \ GB_WERK_POP (Workspaces_size, size_t) ; \ GB_WERK_POP (Workspaces, int64_t *) ; \ } #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE_WORKSPACE ; \ } GrB_Info GB_transpose_bucket // bucket transpose; typecast and apply op ( GrB_Matrix C, // output matrix (static header) const GB_iso_code C_code_iso, // iso code for C const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix A, // input matrix // no operator is applied if op is NULL const GB_Operator op, // unary/idxunop/binop to apply const GrB_Scalar scalar, // scalar to bind to binary operator bool binop_bind1st, // if true, binop(x,A) else binop(A,y) const int nworkspaces, // # of workspaces to use const int nthreads, // # of threads to use GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (C != NULL) ; ASSERT (C->static_header) ; ASSERT_TYPE_OK (ctype, "ctype for transpose", GB0) ; ASSERT_MATRIX_OK (A, "A input for transpose_bucket", GB0) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; // if op is NULL, then no operator is applied // This method is only be used when A is sparse or hypersparse. // The full and bitmap cases are handled in GB_transpose. ASSERT (!GB_IS_FULL (A)) ; ASSERT (!GB_IS_BITMAP (A)) ; ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; GB_WERK_DECLARE (A_slice, int64_t) ; // size nthreads+1 GB_WERK_DECLARE (Workspaces, int64_t *) ; // size nworkspaces GB_WERK_DECLARE (Workspaces_size, size_t) ; // size nworkspaces //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- int64_t anz = GB_nnz (A) ; int64_t vlen = A->vlen ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- // # of threads to use in the O(vlen) loops below GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nth = GB_nthreads (vlen, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate C: always sparse //-------------------------------------------------------------------------- // The bucket transpose only works when C is sparse. // A can be sparse or hypersparse. // C->p is allocated but not initialized. GrB_Info info ; // set C->iso = C_iso OK bool C_iso = (C_code_iso != GB_NON_ISO) ; GB_OK (GB_new_bix (&C, true, // sparse, static header ctype, A->vdim, vlen, GB_Ap_malloc, C_is_csc, GxB_SPARSE, true, A->hyper_switch, vlen, anz, true, C_iso, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_PUSH (Workspaces, nworkspaces, int64_t *) ; GB_WERK_PUSH (Workspaces_size, nworkspaces, size_t) ; if (Workspaces == NULL || Workspaces_size == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } bool ok = true ; for (int tid = 0 ; tid < nworkspaces ; tid++) { Workspaces [tid] = GB_MALLOC_WORK (vlen + 1, int64_t, &Workspaces_size [tid]) ; ok = ok && (Workspaces [tid] != NULL) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //========================================================================== // phase1: symbolic analysis //========================================================================== // slice the A matrix, perfectly balanced for one task per thread GB_WERK_PUSH (A_slice, nthreads + 1, int64_t) ; if (A_slice == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (A_slice, A->p, A->nvec, nthreads, true) ; // sum up the row counts and find C->p if (nthreads == 1) { //---------------------------------------------------------------------- // sequential method: A is not sliced //---------------------------------------------------------------------- // Only requires a single int64 workspace of size vlen for a single // thread. The resulting C matrix is not jumbled. // compute the row counts of A. No need to scan the A->p pointers ASSERT (nworkspaces == 1) ; int64_t *restrict workspace = Workspaces [0] ; memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ; const int64_t *restrict Ai = A->i ; for (int64_t p = 0 ; p < anz ; p++) { int64_t i = Ai [p] ; workspace [i]++ ; } // cumulative sum of the workspace, and copy back into C->p GB_cumsum (workspace, vlen, &(C->nvec_nonempty), 1, NULL) ; memcpy (Cp, workspace, (vlen + 1) * sizeof (int64_t)) ; } else if (nworkspaces == 1) { //---------------------------------------------------------------------- // atomic method: A is sliced but workspace is shared //---------------------------------------------------------------------- // Only requires a single int64 workspace of size vlen, shared by all // threads. Scales well, but requires atomics. If the # of rows is // very small and the average row degree is high, this can be very slow // because of contention on the atomic workspace. Otherwise, it is // typically faster than the non-atomic method. The resulting C matrix // is jumbled. // compute the row counts of A. No need to scan the A->p pointers int64_t *restrict workspace = Workspaces [0] ; GB_memset (workspace, 0, (vlen + 1) * sizeof (int64_t), nth) ; const int64_t *restrict Ai = A->i ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t i = Ai [p] ; // update workspace [i]++ automically: GB_ATOMIC_UPDATE workspace [i]++ ; } C->jumbled = true ; // atomic transpose leaves C jumbled // cumulative sum of the workspace, and copy back into C->p GB_cumsum (workspace, vlen, &(C->nvec_nonempty), nth, Context) ; GB_memcpy (Cp, workspace, (vlen+ 1) * sizeof (int64_t), nth) ; } else { //---------------------------------------------------------------------- // non-atomic method //---------------------------------------------------------------------- // compute the row counts of A for each slice, one per thread; This // method is parallel, but not highly scalable. Each thread requires // int64 workspace of size vlen, but no atomics are required. The // resulting C matrix is not jumbled, so this can save work if C needs // to be unjumbled later. ASSERT (nworkspaces == nthreads) ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { // get the row counts for this slice, of size A->vlen int64_t *restrict workspace = Workspaces [tid] ; memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ; for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++) { // iterate over the entries in A(:,j) int64_t j = GBH (Ah, k) ; int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // count one more entry in C(i,:) for this slice int64_t i = Ai [pA] ; workspace [i]++ ; } } } // cumulative sum of the workspaces across the slices int64_t i ; #pragma omp parallel for num_threads(nth) schedule(static) for (i = 0 ; i < vlen ; i++) { int64_t s = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { int64_t *restrict workspace = Workspaces [tid] ; int64_t c = workspace [i] ; workspace [i] = s ; s += c ; } Cp [i] = s ; } Cp [vlen] = 0 ; // compute the vector pointers for C GB_cumsum (Cp, vlen, &(C->nvec_nonempty), nth, Context) ; // add Cp back to all Workspaces #pragma omp parallel for num_threads(nth) schedule(static) for (i = 0 ; i < vlen ; i++) { int64_t s = Cp [i] ; int64_t *restrict workspace = Workspaces [0] ; workspace [i] = s ; for (int tid = 1 ; tid < nthreads ; tid++) { int64_t *restrict workspace = Workspaces [tid] ; workspace [i] += s ; } } } C->magic = GB_MAGIC ; //========================================================================== // phase2: transpose A into C //========================================================================== // transpose both the pattern and the values if (op == NULL) { // do not apply an operator; optional typecast to C->type GB_transpose_ix (C, A, Workspaces, A_slice, nworkspaces, nthreads) ; } else { // apply an operator, C has type op->ztype GB_transpose_op (C, C_code_iso, op, scalar, binop_bind1st, A, Workspaces, A_slice, nworkspaces, nthreads) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C transpose of A", GB0) ; ASSERT (C->h == NULL) ; return (GrB_SUCCESS) ; }
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/color-private.h" #include "magick/channel.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor-private.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/prepress.h" #include "magick/quantize.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t l,f; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo method % when you are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MaxTextExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetNextToken(p,&p,MaxTextExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string) { char token[MaxTextExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetNextToken(kernel_string,&p,MaxTextExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MaxTextExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { ExceptionInfo *exception=AcquireExceptionInfo(); kernel_cache=FileToString(kernel_string+1,~0UL,exception); exception=DestroyExceptionInfo(exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MaxTextExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0, 2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1, sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (< 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) memset(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +MagickSQ2; kernel->values[5] = kernel->values[7]= -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19"); if (kernel == (KernelInfo *) NULL) return(kernel); break; case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +MagickSQ2; kernel->values[7] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +MagickSQ2; kernel->values[8] = -MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -MagickSQ2; kernel->values[6] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>")); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>")); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;"); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo( "ThinSE:41; ThinSE:42; ThinSE:43"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (new_kernel->values == (double *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(double *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle) { KernelInfo *clone_info, *last; last=kernel; DisableMSCWarning(4127) while (1) { RestoreMSCWarning clone_info=CloneKernelInfo(last); if (clone_info == (KernelInfo *) NULL) break; RotateKernelInfo(clone_info,angle); if (SameKernelInfo(kernel,clone_info) != MagickFalse) break; LastKernelInfo(last)->next=clone_info; last=clone_info; } if (clone_info != (KernelInfo *) NULL) clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but % without any user controls. This allows internel programs to use this % function, to actually perform a specific task without possible interference % by any API user supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ChannelType channel, const ssize_t iterations, % const KernelInfo *kernel, const CompositeMethod compose, % const double bias, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o channel: the channels to which the operations are applied % The channel 'sync' flag determines if 'alpha weighting' is % applied for convolution style operations. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ /* Apply a Morphology Primative to an image using the given kernel. ** Two pre-created images must be provided, and no image is created. ** It returns the number of pixels that changed between the images ** for result convergence determination. */ static ssize_t MorphologyPrimitive(const Image *image, Image *result_image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,const double bias,ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *p_view, *q_view; register ssize_t i; size_t *changes, changed, virt_width; ssize_t y, offx, offy; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(result_image != (Image *) NULL); assert(result_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; p_view=AcquireVirtualCacheView(image,exception); q_view=AcquireAuthenticCacheView(result_image,exception); virt_width=image->columns+kernel->width-1; /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* kernel is used as is, without reflection */ break; default: assert("Not a Primitive Morphology Method" != (char *) NULL); break; } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changes[i]=0; if ( method == ConvolveMorphology && kernel->width == 1 ) { /* Special handling (for speed) of vertical (blur) kernels. ** This performs its handling in columns rather than in rows. ** This is only done for convolve as it is the only method that ** generates very large 1-D vertical kernels (such as a 'BlurKernel') ** ** Timing tests (on single CPU laptop) ** Using a vertical 1-d Blue with normal row-by-row (below) ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.807u ** Using this column method ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.620u ** ** Anthony Thyssen, 14 June 2010 */ register ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,result_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t y; ssize_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1, exception); q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = offy; for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket result; register ssize_t v; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+y+r)); /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+y; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; result.red += (*k)*GetPixelRed(k_pixels); result.green += (*k)*GetPixelGreen(k_pixels); result.blue += (*k)*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += (*k)*(*k_indexes); k--; k_pixels++; k_indexes++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+y,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double gamma; /* divisor, sum of color alpha weighting */ MagickRealType alpha; /* alpha weighting for colors : alpha */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels)); count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ gamma += alpha; /* normalize alpha weights only */ result.red += alpha*GetPixelRed(k_pixels); result.green += alpha*GetPixelGreen(k_pixels); result.blue += alpha*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += alpha*(*k_indexes); k--; k_pixels++; k_indexes++; } /* Sync'ed channels, all channels are modified */ gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelRed(q,ClampToQuantum(gamma*result.red)); SetPixelGreen(q,ClampToQuantum(gamma*result.green)); SetPixelBlue(q,ClampToQuantum(gamma*result.blue)); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index)); } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q)) || ( p[r].green != GetPixelGreen(q)) || ( p[r].blue != GetPixelBlue(q)) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+y+r) != GetPixelIndex(q_indexes+y))) ) changes[id]++; p++; q++; } /* y */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* x */ result_image->type=image->type; q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* ** Normal handling of horizontal or rectangular kernels (row by row) */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,result_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; size_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width, kernel->height, exception); q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; DoublePixelPacket result, min, max; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+x+r)); /* Defaults */ min.red = min.green = min.blue = min.opacity = min.index = (double) QuantumRange; max.red = max.green = max.blue = max.opacity = max.index = 0.0; /* default result is the original pixel value */ result.red = (double) p[r].red; result.green = (double) p[r].green; result.blue = (double) p[r].blue; result.opacity = QuantumRange - (double) p[r].opacity; result.index = 0.0; if ( image->colorspace == CMYKColorspace) result.index = (double) GetPixelIndex(p_indexes+x+r); switch (method) { case ConvolveMorphology: /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; break; case DilateIntensityMorphology: case ErodeIntensityMorphology: /* use a boolean flag indicating when first match found */ result.red = 0.0; /* result is not used otherwise */ break; default: break; } switch ( method ) { case ConvolveMorphology: /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** Correlation is actually the same as this but without reflecting ** the kernel, and thus 'lower-level' that Convolution. However ** as Convolution is the more common method used, and it does not ** really cost us much in terms of processing to use a reflected ** kernel, so it is Convolution that is implemented. ** ** Correlation will have its kernel reflected before calling ** this function to do a Convolve. ** ** For more details of Correlation vs Convolution see ** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; result.red += (*k)*k_pixels[u].red; result.green += (*k)*k_pixels[u].green; result.blue += (*k)*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index += (*k)*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum((MagickRealType) result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double alpha, /* alpha weighting for colors : alpha */ gamma; /* divisor, sum of color alpha weighting */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity); count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ gamma += alpha; /* normalize alpha weights only */ result.red += alpha*k_pixels[u].red; result.green += alpha*k_pixels[u].green; result.blue += alpha*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index+=alpha*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } /* Sync'ed channels, all channels are modified */ gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue))); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma* result.index))); } break; case ErodeMorphology: /* Minimum Value within kernel neighbourhood ** ** NOTE that the kernel is not reflected for this operation! ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateMorphology: /* Maximum Value within kernel neighbourhood ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. ** */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* Minimum of Foreground Pixel minus Maxumum of Background Pixels ** ** NOTE that the kernel is not reflected for this operation, ** and consists of both foreground and background pixel ** neighbourhoods, 0.0 for background, and 1.0 for foreground ** with either Nan or 0.5 values for don't care. ** ** Note that this will never produce a meaningless negative ** result. Such results can cause Thinning/Thicken to not work ** correctly when used against a greyscale image. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) ) continue; if ( (*k) > 0.7 ) { /* minimim of foreground pixels */ Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex( k_indexes+u)); } else if ( (*k) < 0.3 ) { /* maximum of background pixels */ Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } } k_pixels += virt_width; k_indexes += virt_width; } /* Pattern Match if difference is positive */ min.red -= max.red; Maximize( min.red, 0.0 ); min.green -= max.green; Maximize( min.green, 0.0 ); min.blue -= max.blue; Maximize( min.blue, 0.0 ); min.opacity -= max.opacity; Maximize( min.opacity, 0.0 ); min.index -= max.index; Maximize( min.index, 0.0 ); break; case ErodeIntensityMorphology: /* Select Pixel with Minimum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity. ** ** NOTE that the kernel is not reflected for this operation! */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateIntensityMorphology: /* Select Pixel with Maximum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity (yet). ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */ if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case IterativeDistanceMorphology: /* Work out an iterative distance from black edge of a white image ** shape. Essentually white values are decreased to the smallest ** 'distance from edge' it can find. ** ** It works by adding kernel values to the neighbourhood, and and ** select the minimum value found. The kernel is rotated before ** use, so kernel distances match resulting distances, when a user ** provided asymmetric kernel is applied. ** ** ** This code is almost identical to True GrayScale Morphology But ** not quite. ** ** GreyDilate Kernel values added, maximum value found Kernel is ** rotated before use. ** ** GrayErode: Kernel values subtracted and minimum value found No ** kernel rotation used. ** ** Note the the Iterative Distance method is essentially a ** GrayErode, but with negative kernel values, and kernel ** rotation applied. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case UndefinedMorphology: default: break; /* Do nothing */ } /* Final mathematics of results (combine with original image?) ** ** NOTE: Difference Morphology operators Edge* and *Hat could also ** be done here but works better with iteration as a image difference ** in the controlling function (below). Thicken and Thinning however ** should be done here so thay can be iterated correctly. */ switch ( method ) { case HitAndMissMorphology: case ErodeMorphology: result = min; /* minimum of neighbourhood */ break; case DilateMorphology: result = max; /* maximum of neighbourhood */ break; case ThinningMorphology: /* subtract pattern match from original */ result.red -= min.red; result.green -= min.green; result.blue -= min.blue; result.opacity -= min.opacity; result.index -= min.index; break; case ThickenMorphology: /* Add the pattern matchs to the original */ result.red += min.red; result.green += min.green; result.blue += min.blue; result.opacity += min.opacity; result.index += min.index; break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case UndefinedMorphology: case ConvolveMorphology: case DilateIntensityMorphology: case ErodeIntensityMorphology: break; /* full pixel was directly assigned - not a channel method */ default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if ((channel & OpacityChannel) != 0 && image->matte != MagickFalse ) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changes[id]++; p++; q++; } /* x */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* y */ q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t)changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, ** but will apply the primitive directly to the actual image using two ** passes, once in each direction, with the results of the previous (and ** current) row being re-used. ** ** That is after each row is 'Sync'ed' into the image, the next row will ** make use of those values as part of the calculation of the next row. ** It then repeats, but going in the oppisite (bottom-up) direction. ** ** Because of this 're-use of results' this function can not make use ** of multi-threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,ExceptionInfo *exception) { CacheView *auth_view, *virt_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y, offx, offy; size_t changed, virt_width; status=MagickTrue; changed=0; progress=0; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case DistanceMorphology: case VoronoiMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; #if 0 case ?????Morphology: /* kernel is used as is, without reflection */ break; #endif default: assert("Not a PrimativeDirect Morphology Method" != (char *) NULL); break; } /* DO NOT THREAD THIS CODE! */ /* two views into same image (virtual, and actual) */ virt_view=AcquireVirtualCacheView(image,exception); auth_view=AcquireAuthenticCacheView(image,exception); virt_width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; ssize_t r; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only top half of kernel is processed as we do a single pass downward ** through the image iterating the distance function as we go. */ if (status == MagickFalse) break; p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = (ssize_t) virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; MagickPixelPacket result; /* Starting Defaults */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, while coping the color ** values of the closest pixel. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel so that alpha can ** also be used as part of the results. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changed++; /* The pixel was changed in some way! */ p++; /* increment pixel buffers */ q++; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) if ( SetImageProgress(image,MorphologyTag,progress++,image->rows) == MagickFalse ) status=MagickFalse; } /* y */ /* Do the reversed pass through the image */ for (y=(ssize_t)image->rows-1; y >= 0; y--) { register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; ssize_t r; if (status == MagickFalse) break; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only the bottom half of the kernel will be processes as we ** up the image. */ p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* adjust positions to end of row */ p += image->columns-1; q += image->columns-1; /* offset to origin in 'p'. while 'q' points to it directly */ r = offx; for (x=(ssize_t)image->columns-1; x >= 0; x--) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; MagickPixelPacket result; /* Default - previously modified pixel */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, coping the closest color. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changed++; /* The pixel was changed in some way! */ p--; /* go backward through pixel buffers */ q--; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) if ( SetImageProgress(image,MorphologyTag,progress++,image->rows) == MagickFalse ) status=MagickFalse; } /* y */ auth_view=DestroyCacheView(auth_view); virt_view=DestroyCacheView(virt_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive ** application functions. This function handles any iteration loops, ** composition or re-iteration of results, and compound morphology methods ** that is based on multiple low-level (staged) morphology methods. ** ** Basically this provides the complex grue between the requested morphology ** method and raw low-level implementation (above). */ MagickExport Image *MorphologyApply(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose, const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MaxTextExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsMagickTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse) { InheritException(exception,&rslt_image->exception); goto error_cleanup; } changed = MorphologyPrimitiveDirect(rslt_image, method, channel, kernel, exception); if ( verbose != MagickFalse ) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); (void) CompositeImageChannel(rslt_image, DefaultChannels, CopyOpacityCompositeOp, image, 0, 0); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if ( verbose != MagickFalse ) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass) == MagickFalse) { InheritException(exception,&work_image->exception); goto error_cleanup; } /* work_image->type=image->type; ??? */ } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, channel, this_kernel, bias, exception); if ( verbose != MagickFalse ) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if ( verbose != MagickFalse && kernel_changed != (size_t)changed ) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if ( verbose != MagickFalse && stage_loop < stage_limit ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image", CommandOptionToMnemonic(MagickMorphologyOptions,method)); (void) CompositeImageChannel(curr_image,(ChannelType) (channel & ~SyncChannels),DifferenceCompositeOp,image,0,0); break; case EdgeMorphology: if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode", CommandOptionToMnemonic(MagickMorphologyOptions,method)); (void) CompositeImageChannel(curr_image,(ChannelType) (channel & ~SyncChannels),DifferenceCompositeOp,save_image,0,0); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if ( verbose != MagickFalse ) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImageChannel(rslt_image, (ChannelType) (channel & ~SyncChannels), rslt_compose, curr_image, 0, 0); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImageChannel() applies a user supplied kernel to the image % according to the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-bias" or "-define convolve:bias=??") % * Kernel Scale/normalize settings ("-set 'option:convolve:scale'") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-set option:showKernel 1") % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % Image *MorphologyImageChannel(const Image *image, const ChannelType % channel,MorphologyMethod method,const ssize_t iterations, % KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { Image *morphology_image; morphology_image=MorphologyImageChannel(image,DefaultChannels,method, iterations,kernel,exception); return(morphology_image); } MagickExport Image *MorphologyImageChannel(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception) { KernelInfo *curr_kernel; CompositeOperator compose; double bias; Image *morphology_image; /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ curr_kernel = (KernelInfo *) kernel; bias=image->bias; if ((method == ConvolveMorphology) || (method == CorrelateMorphology)) { const char *artifact; artifact = GetImageArtifact(image,"convolve:bias"); if (artifact != (const char *) NULL) bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) { curr_kernel=DestroyKernelInfo(curr_kernel); return((Image *) NULL); } ScaleGeometryKernelInfo(curr_kernel, artifact); } } /* display the (normalized) kernel via stderr */ if ( IsMagickTrue(GetImageArtifact(image,"showKernel")) || IsMagickTrue(GetImageArtifact(image,"convolve:showKernel")) || IsMagickTrue(GetImageArtifact(image,"morphology:showKernel")) ) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { const char *artifact; compose = UndefinedCompositeOp; /* use default for method */ artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) compose = (CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,artifact); } /* Apply the Morphology */ morphology_image = MorphologyApply(image, channel, method, iterations, curr_kernel, compose, bias, exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register size_t i,j,x,y; register double *k,t; k=kernel->values; for( i=0, x=kernel->width-1; i<=x; i++, x--) for( j=0, y=kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ double t; register double *k; size_t i, j; k=kernel->values; for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { GeometryFlags flags; GeometryInfo args; SetGeometryInfo(&args); flags = (GeometryFlags) ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register ssize_t i; register double pos_scale, neg_scale; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if ( ! IsNaN(kernel->values[i]) ) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'showKernel' option request. % % The format of the ShowKernelInfo method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if ( IsNaN(k->values[i]) ) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if ( IsNaN(kernel->values[i]) ) kernel->values[i] = 0.0; return; }
ten_tusscher_2004_epi_S1_4.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S1_4.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5808390037434,0.00128660871673998,0.780017411965445,0.779866089420134,0.000174427830947983,0.485221044706665,0.00293766951726531,0.999998352403933,1.92945075222659e-08,1.88789743418140e-05,0.999774028269383,1.00656274895341,0.999980305363904,5.75119942688369e-05,0.652562498130868,9.24127402937561,140.252453661949}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.1821693920716,0.000262369857178200,0.000171567529876738,0.000414005106483591,0.297500226048348,0.162622717394298,0.207515183338143,3.39980849488085,0.0224798791846427,2.56467648820225,1096.76282222310,0.000572145335603343,0.124382279366777,0.0197003709329121,0.00191117528600119,6.10868623397025e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
GB_unop__identity_int8_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_int64) // op(A') function: GB (_unop_tran__identity_int8_int64) // C type: int8_t // A type: int64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_int64) ( int8_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hadvuv.h
#ifndef HADVUV_H #define HADVUV_H ElementType advectionDriver(const Storage3D field, const int64_t i, const int64_t j, const int64_t k, const ElementType uavg, const ElementType vavg, const ElementType eddlat, const ElementType eddlon) { ElementType result_x = 0.0; ElementType result_y = 0.0; if (uavg > 0) { result_x = uavg * (ElementType(-1.0) / ElementType(6.0) * field(i - 2, j, k) + field(i - 1, j, k) + ElementType(-0.5) * field(i, j, k) + ElementType(-1.0) / ElementType(3.0) * field(i + 1, j, k)); } else { result_x = -uavg * (ElementType(-1.0) / ElementType(3.0) * field(i - 1, j, k) + ElementType(-0.5) * field(i, j, k) + field(i + 1, j, k) + ElementType(-1.0) / ElementType(6.0) * field(i + 2, j, k)); } if (vavg > 0) { result_y = vavg * (ElementType(-1.0) / ElementType(6.0) * field(i, j - 2, k) + field(i, j - 1, k) + ElementType(-0.5) * field(i, j, k) + ElementType(-1.0) / ElementType(3.0) * field(i, j + 1, k)); } else { result_y = -vavg * (ElementType(-1.0) / ElementType(3.0) * field(i, j - 1, k) + ElementType(-0.5) * field(i, j, k) + field(i, j + 1, k) + ElementType(-1.0) / ElementType(6.0) * field(i, j + 2, k)); } return eddlat * result_x + eddlon * result_y; } void hadvuv(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage1D& acrlat0, const Storage1D& acrlat1, const Storage1D& tgrlatda0, const Storage1D& tgrlatda1, Storage3D& uatupos, Storage3D& vatupos, Storage3D& uatvpos, Storage3D& vatvpos, Storage3D& uavg, Storage3D& vavg, Storage3D& ures, Storage3D& vres, const ElementType eddlat, const ElementType eddlon) { for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { uatupos(i, j, k) = (ElementType(1.0) / ElementType(3.0)) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i + 1, j, k)); vatupos(i, j, k) = ElementType(0.25) * (vin(i + 1, j, k) + vin(i + 1, j - 1, k) + vin(i, j, k) + vin(i, j - 1, k)); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { uavg(i, j, k) = acrlat0(j) * uatupos(i, j, k); vavg(i, j, k) = EARTH_RADIUS_RECIP * vatupos(i, j, k); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { ures(i, j, k) = advectionDriver(uin, i, j, k, uavg(i, j, k), vavg(i, j, k), eddlat, eddlon); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { uout(i, j, k) = ures(i, j, k) + tgrlatda0(j) * uin(i, j, k) * vatupos(i, j, k); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { uatvpos(i, j, k) = ElementType(0.25) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i, j + 1, k) + uin(i - 1, j + 1, k)); vatvpos(i, j, k) = ElementType(1.0) / ElementType(3.0) * (vin(i, j - 1, k) + vin(i, j, k) + vin(i, j + 1, k)); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { uavg(i, j, k) = acrlat1(j) * uatvpos(i, j, k); vavg(i, j, k) = EARTH_RADIUS_RECIP * vatvpos(i, j, k); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { vres(i, j, k) = advectionDriver(vin, i, j, k, uavg(i, j, k), vavg(i, j, k), eddlat, eddlon); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { vout(i, j, k) = vres(i, j, k) - tgrlatda1(j) * uatvpos(i, j, k) * uatvpos(i, j, k); } } } } void hadvuv_partialfusion(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage1D& acrlat0, const Storage1D& acrlat1, const Storage1D& tgrlatda0, const Storage1D& tgrlatda1, Storage3D& uatupos, Storage3D& vatupos, Storage3D& uatvpos, Storage3D& vatvpos, Storage3D& uavg, Storage3D& vavg, Storage3D& ures, Storage3D& vres, const ElementType eddlat, const ElementType eddlon) { for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { auto _uatupos = (ElementType(1.0) / ElementType(3.0)) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i + 1, j, k)); auto _vatupos = ElementType(0.25) * (vin(i + 1, j, k) + vin(i + 1, j - 1, k) + vin(i, j, k) + vin(i, j - 1, k)); auto _uavg = acrlat0(j) * _uatupos; auto _vavg = EARTH_RADIUS_RECIP * _vatupos; auto _ures = advectionDriver(uin, i, j, k, _uavg, _vavg, eddlat, eddlon); uout(i, j, k) = _ures + tgrlatda0(j) * uin(i, j, k) * _vatupos; } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { auto _uatvpos = ElementType(0.25) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i, j + 1, k) + uin(i - 1, j + 1, k)); auto _vatvpos = ElementType(1.0) / ElementType(3.0) * (vin(i, j - 1, k) + vin(i, j, k) + vin(i, j + 1, k)); auto _uavg = acrlat1(j) * _uatvpos; auto _vavg = EARTH_RADIUS_RECIP * _vatvpos; auto _vres = advectionDriver(vin, i, j, k, _uavg, _vavg, eddlat, eddlon); vout(i, j, k) = _vres - tgrlatda1(j) * _uatvpos * _uatvpos; } } } } void hadvuv_fullfusion(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage1D& acrlat0, const Storage1D& acrlat1, const Storage1D& tgrlatda0, const Storage1D& tgrlatda1, Storage3D& uatupos, Storage3D& vatupos, Storage3D& uatvpos, Storage3D& vatvpos, Storage3D& uavg, Storage3D& vavg, Storage3D& ures, Storage3D& vres, const ElementType eddlat, const ElementType eddlon) { for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { auto _uatupos = (ElementType(1.0) / ElementType(3.0)) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i + 1, j, k)); auto _vatupos = ElementType(0.25) * (vin(i + 1, j, k) + vin(i + 1, j - 1, k) + vin(i, j, k) + vin(i, j - 1, k)); auto _uavg = acrlat0(j) * _uatupos; auto _vavg = EARTH_RADIUS_RECIP * _vatupos; auto _ures = advectionDriver(uin, i, j, k, _uavg, _vavg, eddlat, eddlon); uout(i, j, k) = _ures + tgrlatda0(j) * uin(i, j, k) * _vatupos; auto _uatvpos = ElementType(0.25) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i, j + 1, k) + uin(i - 1, j + 1, k)); auto _vatvpos = ElementType(1.0) / ElementType(3.0) * (vin(i, j - 1, k) + vin(i, j, k) + vin(i, j + 1, k)); _uavg = acrlat1(j) * _uatvpos; _vavg = EARTH_RADIUS_RECIP * _vatvpos; auto _vres = advectionDriver(vin, i, j, k, _uavg, _vavg, eddlat, eddlon); vout(i, j, k) = _vres - tgrlatda1(j) * _uatvpos * _uatvpos; } } } } void hadvuv_openmp(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage1D& acrlat0, const Storage1D& acrlat1, const Storage1D& tgrlatda0, const Storage1D& tgrlatda1, Storage3D& uatupos, Storage3D& vatupos, Storage3D& uatvpos, Storage3D& vatvpos, Storage3D& uavg, Storage3D& vavg, Storage3D& ures, Storage3D& vres, const ElementType eddlat, const ElementType eddlon) { #pragma omp parallel for for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { auto _uatupos = (ElementType(1.0) / ElementType(3.0)) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i + 1, j, k)); auto _vatupos = ElementType(0.25) * (vin(i + 1, j, k) + vin(i + 1, j - 1, k) + vin(i, j, k) + vin(i, j - 1, k)); auto _uavg = acrlat0(j) * _uatupos; auto _vavg = EARTH_RADIUS_RECIP * _vatupos; auto _ures = advectionDriver(uin, i, j, k, _uavg, _vavg, eddlat, eddlon); uout(i, j, k) = _ures + tgrlatda0(j) * uin(i, j, k) * _vatupos; auto _uatvpos = ElementType(0.25) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i, j + 1, k) + uin(i - 1, j + 1, k)); auto _vatvpos = ElementType(1.0) / ElementType(3.0) * (vin(i, j - 1, k) + vin(i, j, k) + vin(i, j + 1, k)); _uavg = acrlat1(j) * _uatvpos; _vavg = EARTH_RADIUS_RECIP * _vatvpos; auto _vres = advectionDriver(vin, i, j, k, _uavg, _vavg, eddlat, eddlon); vout(i, j, k) = _vres - tgrlatda1(j) * _uatvpos * _uatvpos; } } } } #endif // HADVUV_H
pyfr_gemm_rm.c
/****************************************************************************** ** Copyright (c) 2016-2017, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <mkl.h> #include <libxsmm.h> static double sec(struct timeval start, struct timeval end) { return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6; } int main(int argc, char *argv[]) { int n,m,k; int lda,ldb,ldc; double* a; double* b; double* c1; double* c2; struct timeval l_start, l_end; double l_total = 0.0; int reps, i, j; const int nblock = 16; double alpha = 1.0, beta = 1.0; char transa = 'N', transb = 'N'; libxsmm_gemm_prefetch_type l_prefetch_op = LIBXSMM_PREFETCH_NONE; libxsmm_dmmfunction kernel = NULL; if (argc != 5) { fprintf(stderr, "Invalid ./a,out M N K reps\n"); //fprintf(stderr, "Invalid ./a,out M N K\n"); exit(-1); } m = atoi(argv[1]); n = atoi(argv[2]); k = atoi(argv[3]); reps = atoi(argv[4]); // this is col-major what you want to use // for the sizes in question lda = k; ldb = n; ldc = n; if (n % nblock != 0) { fprintf(stderr, "N needs to be divisable by %i\n", nblock); exit(-1); } a = (double*)_mm_malloc(lda*m*sizeof(double), 64); b = (double*)_mm_malloc(ldb*k*sizeof(double), 64); c1 = (double*)_mm_malloc(ldc*m*sizeof(double), 64); c2 = (double*)_mm_malloc(ldc*m*sizeof(double), 64); #pragma omp parallel for for (i = 0; i < lda*m; i++) { a[i] = drand48(); } #pragma omp parallel for for (i = 0; i < ldb*k; i++) { b[i] = drand48(); } #pragma omp parallel for for (i = 0; i < ldc*m; i++) { c1[i] = 0; c2[i] = 0; } // JIT Kernel kernel = libxsmm_dmmdispatch(nblock, m, k, &ldb, &lda, &ldc, NULL, NULL, NULL, &l_prefetch_op ); if (kernel == 0) { printf("JIT failed, exiting\n"); exit(-1); } // init MKL dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc); #pragma omp parallel for for (i = 0; i < ldc*m; i++) { c1[i] = 0; c2[i] = 0; } gettimeofday(&l_start, NULL); for ( j = 0; j < reps; j++ ) { dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); gettimeofday(&l_start, NULL); for ( j = 0; j < reps; j++ ) { #pragma omp parallel for private(i) for ( i = 0; i < n; i+=nblock) { kernel( b+i, a, c2+i, NULL, NULL, NULL ); } gettimeofday(&l_end, NULL); } l_total = sec(l_start, l_end); fprintf(stdout, "time[s] libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); // test result double max_error = 0.0; for ( i = 0; i < ldc*m; i++) { if (max_error < fabs(c1[i] - c2[i])) { max_error = fabs(c1[i] - c2[i]); } } printf("max error: %f\n\n", max_error); }
place_report.c
#define _GNU_SOURCE #include "quo.h" #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #include <sched.h> #include <mpi.h> #include <omp.h> /* Heavily modified from xthi.c code */ /* xthi.c code is used in examples for hybrid MPI/OpenMP affinity from a few HPC sites */ /* xthi.c originally borrowed some of this code from util-linux-2.13-pre7/schedutils/taskset.c */ static char *cpuset_to_cstr(cpu_set_t *mask, char *str) { char *ptr = str; int i, j, entry_made = 0; for (i = 0; i < CPU_SETSIZE; i++) { if (CPU_ISSET(i, mask)) { int run = 0; entry_made = 1; for (j = i + 1; j < CPU_SETSIZE; j++) { if (CPU_ISSET(j, mask)) run++; else break; } if (!run) sprintf(ptr, "%d,", i); else if (run == 1) { sprintf(ptr, "%d,%d,", i, i + 1); i++; } else { sprintf(ptr, "%d-%d,", i, i + run); i += run; } while (*ptr != 0) ptr++; } } ptr -= entry_made; *ptr = 0; return(str); } #ifdef _OPENMP void place_report_mpi_omp(void) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); int socket_global[144]; char clbuf_global[144][7 * CPU_SETSIZE]; #pragma omp parallel { if (omp_get_thread_num() == 0 && rank == 0){ printf("Running with %d thread(s)\n",omp_get_num_threads()); int bind_policy = omp_get_proc_bind(); switch (bind_policy) { case omp_proc_bind_false: printf(" proc_bind is false\n"); break; case omp_proc_bind_true: printf(" proc_bind is true\n"); break; case omp_proc_bind_master: printf(" proc_bind is master\n"); break; case omp_proc_bind_close: printf(" proc_bind is close\n"); break; case omp_proc_bind_spread: printf(" proc_bind is spread\n"); } printf(" proc_num_places is %d\n",omp_get_num_places()); } int thread = omp_get_thread_num(); cpu_set_t coremask; char clbuf[7 * CPU_SETSIZE], hnbuf[64]; memset(clbuf, 0, sizeof(clbuf)); memset(hnbuf, 0, sizeof(hnbuf)); gethostname(hnbuf, sizeof(hnbuf)); sched_getaffinity(0, sizeof(coremask), &coremask); cpuset_to_cstr(&coremask, clbuf); strcpy(clbuf_global[thread],clbuf); socket_global[omp_get_thread_num()] = omp_get_place_num(); #pragma omp barrier #pragma omp master for (int i=0; i<omp_get_num_threads(); i++){ printf("Hello from rank %02d, thread %02d, on %s. (core affinity = %2s) OpenMP socket is %2d\n", rank, i, hnbuf, clbuf_global[i], socket_global[i]); } } } #endif void place_report_mpi(void) { int rank, nprocs; cpu_set_t coremask; char clbuf[7 * CPU_SETSIZE], hnbuf[64]; memset(clbuf, 0, sizeof(clbuf)); memset(hnbuf, 0, sizeof(hnbuf)); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); int pid = (int)getpid(); gethostname(hnbuf, sizeof(hnbuf)); sched_getaffinity(0, sizeof(coremask), &coremask); cpuset_to_cstr(&coremask, clbuf); int *pid_global = (int *)malloc(nprocs*sizeof(int)); char *hnbuf_global = (char *)malloc(nprocs*sizeof(char)*64); char *clbuf_global = (char *)malloc(nprocs*sizeof(char)*7*CPU_SETSIZE); MPI_Gather(hnbuf,64,MPI_CHARACTER,hnbuf_global,64,MPI_CHAR,0,MPI_COMM_WORLD); MPI_Gather(clbuf,7*CPU_SETSIZE,MPI_CHARACTER,clbuf_global,7*CPU_SETSIZE,MPI_CHAR,0,MPI_COMM_WORLD); MPI_Gather(&pid,1,MPI_INT,pid_global,1,MPI_INT,0,MPI_COMM_WORLD); if (rank == 0){ for (int irank = 0; irank < nprocs; irank++){ printf("Hello from process %d, rank %d, on %s. (core affinity = %s)\n", pid_global[irank],irank,hnbuf_global+64*irank,clbuf_global+7*CPU_SETSIZE*irank); } } free(hnbuf_global); free(clbuf_global); free(pid_global); } void place_report_mpi_quo(QUO_context quo) { int rank, nprocs; char *cbindstr = NULL; cpu_set_t coremask; char clbuf[7 * CPU_SETSIZE], hnbuf[64]; memset(clbuf, 0, sizeof(clbuf)); memset(hnbuf, 0, sizeof(hnbuf)); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); int pid = (int)getpid(); QUO_stringify_cbind(quo, &cbindstr); int clen = strlen(cbindstr)+1; int clen_max; MPI_Allreduce(&clen, &clen_max, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); gethostname(hnbuf, sizeof(hnbuf)); sched_getaffinity(0, sizeof(coremask), &coremask); cpuset_to_cstr(&coremask, clbuf); int *pid_global = NULL; char *hnbuf_global = NULL; char *clbuf_global = NULL; char *cbind_global = NULL; if (rank == 0){ pid_global = (int *)malloc(nprocs*sizeof(int)); hnbuf_global = (char *)malloc(nprocs*sizeof(char)*64); clbuf_global = (char *)malloc(nprocs*sizeof(char)*7*CPU_SETSIZE); cbind_global = (char *)malloc(nprocs*sizeof(char)*clen_max); } MPI_Gather(hnbuf,64,MPI_CHARACTER,hnbuf_global,64,MPI_CHAR,0,MPI_COMM_WORLD); MPI_Gather(clbuf,7*CPU_SETSIZE,MPI_CHARACTER,clbuf_global,7*CPU_SETSIZE,MPI_CHAR,0,MPI_COMM_WORLD); MPI_Gather(&pid,1,MPI_INT,pid_global,1,MPI_INT,0,MPI_COMM_WORLD); MPI_Gather(cbindstr,clen_max,MPI_CHARACTER,cbind_global,clen_max,MPI_CHARACTER,0,MPI_COMM_WORLD); if (rank == 0){ for (int irank = 0; irank < nprocs; irank++){ printf("Hello from process %d, rank %d, on %s. (core affinity = %s) cbind [%s]\n", pid_global[irank],irank,hnbuf_global+64*irank,clbuf_global+7*CPU_SETSIZE*irank, cbind_global+irank*clen_max); } } free(cbindstr); if (rank == 0){ free(hnbuf_global); free(clbuf_global); free(pid_global); free(cbind_global); } }
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" #define ThrowClassifyException(severity,tag,label) \ {\ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \ { \ next_cluster=cluster->next; \ cluster=(Cluster *) RelinquishMagickMemory(cluster); \ } \ if (squares != (MagickRealType *) NULL) \ { \ squares-=255; \ free_squares=squares; \ free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); \ } \ ThrowBinaryException(severity,tag,label); \ } CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; ssize_t i; MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; squares=(MagickRealType *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); exception=(&image->exception); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireQuantumMemory(1, sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ (void) memset(cluster,0,sizeof(*cluster)); cluster->red=red; cluster->green=green; cluster->blue=blue; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ (void) memset(cluster,0,sizeof(*cluster)); cluster->red=red; cluster->green=green; cluster->blue=blue; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; pixel.red=(double) ScaleQuantumToChar(p->red); pixel.green=(double) ScaleQuantumToChar(p->green); pixel.blue=(double) ScaleQuantumToChar(p->blue); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) && (pixel.red <= (double) (cluster->red.right+SafeMargin)) && (pixel.green >= (double) (cluster->green.left-SafeMargin)) && (pixel.green <= (double) (cluster->green.right+SafeMargin)) && (pixel.blue >= (double) (cluster->blue.left-SafeMargin)) && (pixel.blue <= (double) (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=pixel.red; cluster->green.center+=pixel.green; cluster->blue.center+=pixel.blue; cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowClassifyException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; const PixelPacket *magick_restrict p; IndexPacket *magick_restrict indexes; ssize_t x; PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; SetPixelIndex(indexes+x,0); pixel.red=(double) ScaleQuantumToChar(q->red); pixel.green=(double) ScaleQuantumToChar(q->green); pixel.blue=(double) ScaleQuantumToChar(q->blue); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) && (pixel.red <= (double) (cluster->red.right+SafeMargin)) && (pixel.green >= (double) (cluster->green.left-SafeMargin)) && (pixel.green <= (double) (cluster->green.right+SafeMargin)) && (pixel.blue >= (double) (cluster->blue.left-SafeMargin)) && (pixel.blue <= (double) (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared= squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+ squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+ squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared= squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+ squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+ squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; const PixelPacket *p; ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireQuantumMemory(1, sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; pixel.red=(double) ScaleQuantumToChar(p->red); pixel.green=(double) ScaleQuantumToChar(p->green); pixel.blue=(double) ScaleQuantumToChar(p->blue); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) && (pixel.red <= (double) (cluster->red.right+SafeMargin)) && (pixel.green >= (double) (cluster->green.left-SafeMargin)) && (pixel.green <= (double) (cluster->green.right+SafeMargin)) && (pixel.blue >= (double) (cluster->blue.left-SafeMargin)) && (pixel.blue <= (double) (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=pixel.red; cluster->green.center+=pixel.green; cluster->blue.center+=pixel.blue; cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { const PixelPacket *p; ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { ssize_t count; MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau*=PerceptibleReciprocal((MagickRealType) number_nodes); /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { double alpha, beta, *gamma, sum; ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=(MagickRealType) (alpha*sum); } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { ColorspaceType previous_colorspace; MagickBooleanType status; ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace); InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); (void) TransformImageColorspace(image,previous_colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
LateralConvolution.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/LateralConvolution.c" #else static int nnconv1d_(LateralConvolution_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor expected"); // change to batch mode int batch = 1; if (input->nDimension == 3) { batch = 0; THTensor_(resize4d)(input, 1, nInputPlane, input->size[1], input->size[2]); } long batchSize = input->size[0]; long inputHeight = input->size[2]; long inputWidth = input->size[3]; long outputHeight = inputHeight; long outputWidth = inputWidth; THTensor_(resize4d)(output, batchSize, nOutputPlane, outputHeight, outputWidth); int elt; #pragma omp parallel for private(elt) for (elt = 0; elt < batchSize; elt++) { // select each batch in 2D THTensor *input_t = THTensor_(newSelect)(input, 0, elt); THTensor *output_t = THTensor_(newSelect)(output, 0, elt); THTensor *input2d = THTensor_(newWithStorage2d) (input_t->storage, input_t->storageOffset, nInputPlane, -1, inputHeight*inputWidth, -1); THTensor *output2d = THTensor_(newWithStorage2d) (output_t->storage, output_t->storageOffset, nOutputPlane, -1, outputHeight*outputWidth, -1); // fill biases int i; for (i = 0; i < nOutputPlane; i++) THVector_(fill)(output_t->storage->data+output->storageOffset+output_t->stride[0]*i, THTensor_(get1d)(bias, i), outputHeight*outputWidth); // convolve THTensor_(addmm)(output2d, 1, output2d, 1, weight, input2d); // release temp tensors THTensor_(free)(input2d); THTensor_(free)(output2d); THTensor_(free)(input_t); THTensor_(free)(output_t); } // revert to single batch if (batch == 0) { THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); } return 1; } static int nnconv1d_(LateralConvolution_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THArgCheck(nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); // change to batch mode int batch = 1; if (input->nDimension == 3) { batch = 0; THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); THTensor_(resize4d)(gradOutput, 1, nOutputPlane, gradOutput->size[1], gradOutput->size[2]); } long batchSize = input->size[0]; long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = inputWidth; long outputHeight = inputHeight; THTensor_(resizeAs)(gradInput, input); THTensor_(transpose)(weight, weight, 0, 1); int elt; #pragma omp parallel for private(elt) for (elt = 0; elt < batchSize; elt++) { // select each batch in 2D THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, elt); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, elt); THTensor *gradInput2d = THTensor_(newWithStorage2d) (gradInput_t->storage, gradInput_t->storageOffset, nInputPlane, -1, inputWidth*inputHeight, -1); THTensor *gradOutput2d = THTensor_(newWithStorage2d) (gradOutput_t->storage, gradOutput_t->storageOffset, nOutputPlane, -1, outputWidth*outputHeight, -1); // convolve THTensor_(addmm)(gradInput2d, 0, gradInput2d, 1, weight, gradOutput2d); // release temp tensors THTensor_(free)(gradInput2d); THTensor_(free)(gradOutput2d); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); } THTensor_(transpose)(weight, weight, 0, 1); // revert to single batch if (batch == 0) { THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); THTensor_(resize3d)(gradInput, nInputPlane, inputHeight, inputWidth); THTensor_(resize3d)(gradOutput, nOutputPlane, outputHeight, outputWidth); } return 1; } static int nnconv1d_(LateralConvolution_accGradParameters)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); real scale = luaL_optnumber(L, 4, 1); int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *ones = luaT_getfieldcheckudata(L, 1, "ones", torch_Tensor); THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor); THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor); THArgCheck(nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); // change to batch mode int batch = 1; if (input->nDimension == 3) { batch = 0; THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); THTensor_(resize4d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long batchSize = input->size[0]; long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = inputWidth; long outputHeight = inputHeight; if (ones->nDimension != 1 || ones->size[0] < outputHeight*outputWidth) { THTensor_(resize1d)(ones, outputHeight*outputWidth); THTensor_(fill)(ones, 1); } int elt; for (elt = 0; elt < batchSize; elt++) { // select each batch in 2D THTensor *input_t = THTensor_(newSelect)(input, 0, elt); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, elt); THTensor *input2d = THTensor_(newWithStorage2d) (input_t->storage, input_t->storageOffset, nInputPlane, -1, inputWidth*inputHeight, -1); THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset, nOutputPlane, -1, outputWidth*outputHeight, -1); // convolve THTensor_(transpose)(input2d, input2d, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, input2d); THTensor_(transpose)(input2d, input2d, 0, 1); // fill biases THTensor_(addmv)(gradBias, 1, gradBias, scale, gradOutput2d, ones); THTensor_(free)(input2d); THTensor_(free)(gradOutput2d); THTensor_(free)(input_t); THTensor_(free)(gradOutput_t); } // revert to single batch if (batch == 0) { THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); THTensor_(resize3d)(gradOutput, nOutputPlane, outputHeight, outputWidth); } return 0; } static const struct luaL_Reg nnconv1d_(LateralConvolution__) [] = { {"LateralConvolution_updateOutput", nnconv1d_(LateralConvolution_updateOutput)}, {"LateralConvolution_updateGradInput", nnconv1d_(LateralConvolution_updateGradInput)}, {"LateralConvolution_accGradParameters", nnconv1d_(LateralConvolution_accGradParameters)}, {NULL, NULL} }; static void nnconv1d_(LateralConvolution_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nnconv1d_(LateralConvolution__), "nn"); lua_pop(L,1); } #endif
parallel_sort.h
#ifndef AQSORT_IMPL_PARALLEL_SORT_H #define AQSORT_IMPL_PARALLEL_SORT_H #ifdef _OPENMP #include <omp.h> #endif #include <cassert> #include <cmath> #include <cstddef> #include "parallel_partition.h" #include "sequential_sort.h" namespace aqsort { namespace impl { template<typename Comp, typename Swap> void parallel_quick_sort(std::size_t total_n, std::size_t total_P, std::size_t start, std::size_t n, Comp* const comp, Swap* const swap, std::size_t level) { // avoid unnecessary calling performance penalty assert (n > 1); // tail-recursion removal loop while (true) { // check quicksort worst case if (level == 0) { sequential_sort(start, n, comp, swap, level); return; } level--; // actual number of threads for this chunk std::size_t P = total_P * n / total_n; if (P < 1) P = 1; if (P < 2) { // sequential sort //#pragma omp task untied firstprivate(comp, swap) //#pragma omp task firstprivate(comp, swap) sequential_sort(start, n, comp, swap, level); return; } // choose median-of-medians pivot and put it at the end (position n - 1) std::size_t pivot = select_pivot_mom(start, n, comp); // swap to the end (*swap)(pivot, start + n - 1); pivot = start + n - 1; // parallel partitioning using P threads std::size_t less_than = parallel_partition(start, n, pivot, comp, swap, P); assert(less_than >= 0); assert(less_than <= n); // swap pivot to its final position (*swap)(start + less_than, pivot); // do not process equal-to-pivot elements std::size_t greater_than = n - less_than - 1; while ((greater_than > 0) && ((*comp)(start + less_than, start + n - greater_than) == false) && ((*comp)(start + n - greater_than, start + less_than) == false)) greater_than--; if (less_than > greater_than) { //#pragma omp task untied firstprivate(comp, swap) #pragma omp task firstprivate(comp, swap) parallel_quick_sort(total_n, total_P, start + n - greater_than, greater_than, comp, swap, level); n = less_than; } else { //#pragma omp task untied firstprivate(comp, swap) #pragma omp task firstprivate(comp, swap) parallel_quick_sort(total_n, total_P, start, less_than, comp, swap, level); start += n - greater_than; n = greater_than; } /* if (less_than > 1) #pragma omp task untied firstprivate(comp, swap) parallel_quick_sort(total_n, total_P, start, less_than, comp, swap); if (greater_than > 1) parallel_quick_sort(total_n, total_P, start + n - greater_than, greater_than, comp, swap); */ } } template<typename Comp, typename Swap> void parallel_sort(std::size_t n, Comp* const comp, Swap* const swap) { std::size_t max_level = (std::size_t)(2.0 * floor(log2(double(n)))); int nested_saved = omp_get_nested(); omp_set_nested(1); // nested is a must for parallel algorithm if (omp_get_nested() == 0) { sequential_sort(0, n, comp, swap, max_level); omp_set_nested(nested_saved); return; } std::size_t P = omp_get_num_threads(); if (P > 1) { // already in parallel region #pragma omp master parallel_quick_sort(n, P, 0, n, comp, swap, max_level); #pragma omp barrier } else { #pragma omp parallel { std::size_t P = omp_get_num_threads(); // at least 2 threads is a must for parallel sorting if (P < 2) sequential_sort(0, n, comp, swap, max_level); else { #pragma omp master parallel_quick_sort(n, P, 0, n, comp, swap, max_level); } } // implied barrier at the end of parallel construct } omp_set_nested(nested_saved); } } } #endif
GB_unaryop__ainv_fp64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp64_int16 // op(A') function: GB_tran__ainv_fp64_int16 // C type: double // A type: int16_t // cast: double cij = (double) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp64_int16 ( double *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cg.c
/* Copyright (c) 2015 The University of Edinburgh. */ /* * This software was developed as part of the * EC FP7 funded project Adept (Project ID: 610490) * www.adept-project.eu */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /* See the License for the specific language governing permissions and */ /* limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <time.h> #include <math.h> #include <omp.h> #include "utils.h" #define PCG_TOLERANCE 1e-3 #define PCG_MAX_ITER 1000 #define PCG_FLOAT_TOLERANCE 1e-2 /* Conjugate gradient benchmark */ /* struct for CSR matrix type */ typedef struct { int nrow; int ncol; int nzmax; int *colIndex; int *rowStart; double *values; } CSRmatrix; typedef struct { int nrow; int ncol; int nzmax; int *colIndex; int *rowStart; float *values; } CSRmatrixF; /* * * Sparse matrix and vector utility functions * */ static void CSR_matrix_vector_mult(CSRmatrix *A, double *x, double *b) { int i, j; #pragma omp parallel for schedule(static) private(j) for (i = 0; i < A->nrow; i++) { double sum = 0.0; for (j = A->rowStart[i]; j < A->rowStart[i+1]; j++) { sum += A->values[j] * x[A->colIndex[j]]; } b[i] = sum; } } static void CSR_matrix_vector_multF(CSRmatrixF *A, float *x, float *b) { int i, j; #pragma omp parallel for schedule(static) private(j) for (i = 0; i < A->nrow; i++) { float sum = 0.0; for (j = A->rowStart[i]; j < A->rowStart[i+1]; j++) { sum += A->values[j] * x[A->colIndex[j]]; } b[i] = sum; } } static double dotProduct(double *v1, double *v2, int size) { int i; double result = 0.0; #pragma omp parallel for schedule(static) reduction(+:result) for (i = 0; i < size; i++) { result += v1[i] * v2[i]; } return result; } static double dotProductF(float *v1, float *v2, int size) { int i; float result = 0.0; #pragma omp parallel for schedule(static) reduction(+:result) for (i = 0; i < size; i++) { result += v1[i] * v2[i]; } return result; } static void vecAxpy(double *x, double *y, int size, double alpha) { int i; #pragma omp parallel for schedule(static) for (i = 0; i < size; i++) { y[i] = y[i] + alpha * x[i]; } } static void vecAxpyF(float *x, float *y, int size, float alpha) { int i; #pragma omp parallel for schedule(static) for (i = 0; i < size; i++) { y[i] = y[i] + alpha * x[i]; } } static void vecAypx(double *x, double *y, int size, double alpha) { int i; #pragma omp parallel for schedule(static) for (i = 0; i < size; i++) { y[i] = alpha * y[i] + x[i]; } } static void vecAypxF(float *x, float *y, int size, float alpha) { int i; #pragma omp parallel for schedule(static) for (i = 0; i < size; i++) { y[i] = alpha * y[i] + x[i]; } } int conjugate_gradient(int s) { CSRmatrix *A; int i; double *x, *b, *r, *p, *omega; int k; double r0, r1, beta, dot, alpha; double tol = PCG_TOLERANCE * PCG_TOLERANCE; struct timespec start, end; /*====================================================================== * * generate a random matrix of size s x s * *======================================================================*/ A = malloc(sizeof(CSRmatrix)); A->nrow = s; A->ncol = s; A->nzmax = s; A->colIndex = malloc(A->nzmax * sizeof(int)); A->rowStart = malloc((A->nrow+1) * sizeof(int)); A->values = malloc(A->nzmax * sizeof(double)); /* generate structure for matrix */ #pragma omp parallel for schedule(static) for (i = 0; i < A->nrow; i++) { A->rowStart[i] = i; A->colIndex[i] = i; } A->rowStart[i] = i; /* now generate values for matrix */ srand((unsigned int)time(NULL)); #pragma omp parallel for schedule(static) for (i = 0; i < A->nzmax; i++) { A->values[i] = rand() / 32768.0; } /* * * Initialise vectors * */ /* allocate vectors (unknowns, RHS and temporaries) */ x = malloc(s * sizeof(double)); b = malloc(s * sizeof(double)); r = malloc(s * sizeof(double)); p = malloc(s * sizeof(double)); omega = malloc(s * sizeof(double)); /* generate a random vector of size s for the unknowns */ #pragma omp parallel for schedule(static) for (i = 0; i < s; i++) { x[i] = rand() / 32768.0; } /* multiply matrix by vector to get RHS */ CSR_matrix_vector_mult(A, x, b); /* clear initial guess and initialise temporaries */ #pragma omp parallel for schedule(static) for (i = 0; i < s; i++) { x[i] = 0.0; /* r = b - Ax; since x is 0, r = b */ r[i] = b[i]; /* p = r ( = b)*/ p[i] = b[i]; omega[i] = 0.0; } /* compute initial residual */ r1 = dotProduct(r, r, s); r0 = r1; /* * * Actual solver loop * */ k = 0; clock_gettime(CLOCK, &start); while ((r1 > tol) && (k <= PCG_MAX_ITER)) { /* omega = Ap */ CSR_matrix_vector_mult(A, p, omega); /* dot = p . omega */ dot = dotProduct(p, omega, s); alpha = r1 / dot; /* x = x + alpha.p */ vecAxpy(p, x, s, alpha); /* r = r - alpha.omega */ vecAxpy(omega, r, s, -alpha); r0 = r1; /* r1 = r . r */ r1 = dotProduct(r, r, s); beta = r1 / r0; /* p = r + beta.p */ vecAypx(r, p, s, beta); k++; } clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Conjugate gradient solve."); /* * * Free memory * */ /* free the vectors */ free(omega); free(p); free(r); free(b); free(x); /* free the matrix */ free(A->colIndex); free(A->rowStart); free(A->values); free(A); return 0; } /* mixed precision version */ int conjugate_gradient_mixed(unsigned int s) { CSRmatrix *A; CSRmatrixF *AF; int i; double *x, *b, *r, *p, *omega; float *xf, *bf, *rf, *pf, *omegaf; int k; double r0, r1, beta, dot, alpha; float r0f, r1f, betaf, dotf, alphaf; double tol = PCG_FLOAT_TOLERANCE * PCG_FLOAT_TOLERANCE; struct timespec start, end; /*====================================================================== * * generate a random matrix of size s x s * *======================================================================*/ A = malloc(sizeof(CSRmatrix)); A->nrow = s; A->ncol = s; A->nzmax = s; A->colIndex = malloc(A->nzmax * sizeof(int)); A->rowStart = malloc((A->nrow+1) * sizeof(int)); A->values = malloc(A->nzmax * sizeof(double)); AF = malloc(sizeof(CSRmatrixF)); AF->nrow = s; AF->ncol = s; AF->nzmax = s; AF->colIndex = malloc(AF->nzmax * sizeof(int)); AF->rowStart = malloc((AF->nrow+1) * sizeof(int)); AF->values = malloc(AF->nzmax * sizeof(float)); /* generate structure for matrix */ #pragma omp parallel for schedule(static) for (i = 0; i < A->nrow; i++) { A->rowStart[i] = i; A->colIndex[i] = i; AF->rowStart[i] = i; AF->colIndex[i] = i; } A->rowStart[i] = i; AF->rowStart[i] = i; /* now generate values for matrix */ srand((unsigned int)time(NULL)); #pragma omp parallel for schedule(static) for (i = 0; i < A->nzmax; i++) { A->values[i] = rand() / 32768.0; AF->values[i] = (float)A->values[i]; } /*====================================================================== * * Initialise vectors * *======================================================================*/ /* allocate vectors (unknowns, RHS and temporaries) */ x = malloc(s * sizeof(double)); b = malloc(s * sizeof(double)); r = malloc(s * sizeof(double)); p = malloc(s * sizeof(double)); omega = malloc(s * sizeof(double)); xf = malloc(s * sizeof(float)); bf = malloc(s * sizeof(float)); rf = malloc(s * sizeof(float)); pf = malloc(s * sizeof(float)); omegaf = malloc(s * sizeof(float)); /* generate a random vector of size s for the unknowns */ #pragma omp parallel for schedule(static) for (i = 0; i < s; i++) { x[i] = rand() / 32768.0; xf[i] = (float)x[i]; } /* multiply matrix by vector to get RHS */ CSR_matrix_vector_mult(A, x, b); CSR_matrix_vector_multF(AF, xf, bf); /* clear initial guess and initialise temporaries */ #pragma omp parallel for schedule(static) for (i = 0; i < s; i++) { x[i] = 0.0; xf[i] = 0.0; /* r = b - Ax; since x is 0, r = b */ r[i] = b[i]; rf[i] = bf[i]; /* p = r ( = b)*/ p[i] = b[i]; pf[i] = bf[i]; omega[i] = 0.0; omegaf[i] = 0.0; } clock_gettime(CLOCK, &start); /* compute initial residual */ r1f = dotProductF(rf, rf, s); r0f = r1f; /*====================================================================== * * Actual solver loop (single precision) * *======================================================================*/ k = 0; while ((r1f > tol) && (k <= PCG_MAX_ITER)) { /* omega = Ap */ CSR_matrix_vector_multF(AF, pf, omegaf); /* dot = p . omega */ dotf = dotProductF(pf, omegaf, s); alphaf = r1f / dotf; /* x = x + alpha.p */ vecAxpyF(pf, xf, s, alphaf); /* r = r - alpha.omega */ vecAxpyF(omegaf, rf, s, -alphaf); r0f = r1f; /* r1 = r . r */ r1f = dotProductF(rf, rf, s); betaf = r1f / r0f; /* p = r + beta.p */ vecAypxF(rf, pf, s, betaf); k++; } /* convert for double precision iterations */ r1 = (double)r1f; r0 = (double)r0f; #pragma omp parallel for schedule(static) for (i = 0; i < s; i++) { r[i] = (double)rf[i]; p[i] = (double)pf[i]; x[i] = (double)xf[i]; } tol = PCG_TOLERANCE * PCG_TOLERANCE; /*====================================================================== * * Actual solver loop * *======================================================================*/ while ((r1 > tol) && (k <= PCG_MAX_ITER)) { /* omega = Ap */ CSR_matrix_vector_mult(A, p, omega); /* dot = p . omega */ dot = dotProduct(p, omega, s); alpha = r1 / dot; /* x = x + alpha.p */ vecAxpy(p, x, s, alpha); /* r = r - alpha.omega */ vecAxpy(omega, r, s, -alpha); r0 = r1; /* r1 = r . r */ r1 = dotProduct(r, r, s); beta = r1 / r0; /* p = r + beta.p */ vecAypx(r, p, s, beta); k++; } clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Conjugate gradient solve."); /*====================================================================== * * Free memory * *======================================================================*/ /* free the vectors */ free(omega); free(p); free(r); free(b); free(x); free(omegaf); free(pf); free(rf); free(bf); free(xf); /* free the matrix */ free(A->colIndex); free(A->rowStart); free(A->values); free(A); free(AF->colIndex); free(AF->rowStart); free(AF->values); free(AF); return 0; }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)calloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths(char **paths, int n, int m) { char** random_paths = (char**)calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im = load_image_color(paths[i], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[i]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } extern int check_mistakes; box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)calloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) getchar(); *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)realloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d] \n", id, (classes-1)); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1 \n"); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f \n", x, y); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f \n", w); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f \n", h); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, float *old_truth) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*t_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) for (i = 0; i < n; ++i) { float *truth = (float*)calloc(5 * boxes, sizeof(float)); const char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { if (check_mistakes) getchar(); continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; //blur = rand_int(0, 1) ? (use_blur) : 0; int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 2) blur = use_blur; else blur = tmp_blur; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (min_w_h / 8 < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, blur, boxes, d.y.vals[i]); if (i_mixup) { image old_img = ai; old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5*boxes * sizeof(float)); if (show_imgs)// && i_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } } free(random_paths); if(mixup_random_paths) free(mixup_random_paths); return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int i; int data_size = new_img.w * new_img.h * new_img.c; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5 * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)calloc(5 * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)calloc(args.threads, sizeof(data)); pthread_t* threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)calloc(num, sizeof(float*)); r.y.vals = (float**)calloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)calloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)calloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)calloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
inference_helper.h
/* Copyright 2021 iwatake2222 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef INFERENCE_HELPER_ #define INFERENCE_HELPER_ /* for general */ #include <cstdint> #include <cmath> #include <string> #include <vector> #include <array> #include <memory> class TensorInfo { public: enum { kTensorTypeNone, kTensorTypeUint8, kTensorTypeInt8, kTensorTypeFp32, kTensorTypeInt32, kTensorTypeInt64, }; public: TensorInfo() : name("") , id(-1) , tensor_type(kTensorTypeNone) , is_nchw(true) {} ~TensorInfo() {} int32_t GetElementNum() const { int32_t element_num = 1; for (const auto& dim : tensor_dims) { element_num *= dim; } return element_num; } int32_t GetBatch() const { if (tensor_dims.size() <= 0) return -1; return tensor_dims[0]; } int32_t GetChannel() const { if (is_nchw) { if (tensor_dims.size() <= 1) return -1; return tensor_dims[1]; } else { if (tensor_dims.size() <= 3) return -1; return tensor_dims[3]; } } int32_t GetHeight() const { if (is_nchw) { if (tensor_dims.size() <= 2) return -1; return tensor_dims[2]; } else { if (tensor_dims.size() <= 1) return -1; return tensor_dims[1]; } } int32_t GetWidth() const { if (is_nchw) { if (tensor_dims.size() <= 3) return -1; return tensor_dims[3]; } else { if (tensor_dims.size() <= 2) return -1; return tensor_dims[2]; } } public: std::string name; // [In] Set the name_ of tensor int32_t id; // [Out] Do not modify (Used in InferenceHelper) int32_t tensor_type; // [In] The type of tensor (e.g. kTensorTypeFp32) std::vector<int32_t> tensor_dims; // InputTensorInfo: [In] The dimentions of tensor. (If empty at initialize, the size is updated from model info.) // OutputTensorInfo: [Out] The dimentions of tensor is set from model information bool is_nchw; // [IN] NCHW or NHWC }; class InputTensorInfo : public TensorInfo { public: enum { kDataTypeImage, kDataTypeBlobNhwc, // data_ which already finished preprocess(color conversion, resize, normalize_, etc.) kDataTypeBlobNchw, }; public: InputTensorInfo() : data(nullptr) , data_type(kDataTypeImage) , image_info({ -1, -1, -1, -1, -1, -1, -1, true, false }) , normalize({ 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f }) {} InputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true) : InputTensorInfo() { name = name_; tensor_type = tensor_type_; is_nchw = is_nchw_; } ~InputTensorInfo() {} public: void* data; // [In] Set the pointer to image/blob int32_t data_type; // [In] Set the type of data_ (e.g. kDataTypeImage) struct { int32_t width; int32_t height; int32_t channel; int32_t crop_x; int32_t crop_y; int32_t crop_width; int32_t crop_height; bool is_bgr; // used when channel == 3 (true: BGR, false: RGB) bool swap_color; } image_info; // [In] used when data_type_ == kDataTypeImage struct { float mean[3]; float norm[3]; } normalize; // [In] used when data_type_ == kDataTypeImage }; class OutputTensorInfo : public TensorInfo { public: OutputTensorInfo() : data(nullptr) , quant({ 1.0f, 0 }) , data_fp32_(nullptr) {} OutputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true) : OutputTensorInfo() { name = name_; tensor_type = tensor_type_; is_nchw = is_nchw; } ~OutputTensorInfo() { if (data_fp32_ != nullptr) { delete[] data_fp32_; } } float* GetDataAsFloat() { /* Returned pointer should be with const, but returning pointer without const is convenient to create cv::Mat */ if (tensor_type == kTensorTypeUint8 || tensor_type == kTensorTypeInt8) { if (data_fp32_ == nullptr) { data_fp32_ = new float[GetElementNum()]; } if (tensor_type == kTensorTypeUint8) { #pragma omp parallel for (int32_t i = 0; i < GetElementNum(); i++) { const uint8_t* val_uint8 = static_cast<const uint8_t*>(data); float val_float = (val_uint8[i] - quant.zero_point) * quant.scale; data_fp32_[i] = val_float; } } else { #pragma omp parallel for (int32_t i = 0; i < GetElementNum(); i++) { const int8_t* val_int8 = static_cast<const int8_t*>(data); float val_float = (val_int8[i] - quant.zero_point) * quant.scale; data_fp32_[i] = val_float; } } return data_fp32_; } else if (tensor_type == kTensorTypeFp32) { return static_cast<float*>(data); } else { return nullptr; } } public: void* data; // [Out] Pointer to the output data_ struct { float scale; int32_t zero_point; } quant; // [Out] Parameters for dequantization (convert uint8 to float) private: float* data_fp32_; }; namespace cv { class Mat; }; class InferenceHelper { public: enum { kRetOk = 0, kRetErr = -1, }; typedef enum { kOpencv, kOpencvGpu, kTensorflowLite, kTensorflowLiteXnnpack, kTensorflowLiteGpu, kTensorflowLiteEdgetpu, kTensorflowLiteNnapi, kTensorrt, kNcnn, kMnn, kSnpe, kArmnn, kNnabla, kNnablaCuda, } HelperType; public: static InferenceHelper* Create(const HelperType helper_type); static void PreProcessByOpenCV(const InputTensorInfo& input_tensor_info, bool is_nchw, cv::Mat& img_blob); // use this if the selected inference engine doesn't support pre-process public: virtual ~InferenceHelper() {} virtual int32_t SetNumThreads(const int32_t num_threads) = 0; virtual int32_t SetCustomOps(const std::vector<std::pair<const char*, const void*>>& custom_ops) = 0; virtual int32_t Initialize(const std::string& model_filename, std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) = 0; virtual int32_t Finalize(void) = 0; virtual int32_t PreProcess(const std::vector<InputTensorInfo>& input_tensor_info_list) = 0; virtual int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) = 0; protected: void ConvertNormalizeParameters(InputTensorInfo& tensor_info); void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, float* dst); void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, uint8_t* dst); void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, int8_t* dst); template<typename T> void PreProcessBlob(int32_t num_thread, const InputTensorInfo& input_tensor_info, T *dst); protected: HelperType helper_type_; }; #endif
GB_unop__ainv_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_uint8_uint8) // op(A') function: GB (_unop_tran__ainv_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = -z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = -z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dotproduct_parallel.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> /* Define length of dot product vectors and number of OpenMP threads */ #define VECLEN 100 #define NUMTHREADS 8 int main (int argc, char* argv[]) { int i, tid, len=VECLEN, threads=NUMTHREADS; double *a, *b; double sum, psum; printf("Starting omp_dotprod_openmp. Using %d threads\n",threads); /* Assign storage for dot product vectors */ a = (double*) malloc (len*threads*sizeof(double)); b = (double*) malloc (len*threads*sizeof(double)); /* Initialize dot product vectors */ for (i=0; i<len*threads; i++) { a[i]=1.0; b[i]=a[i]; } /* Initialize global sum */ sum = 0.0; /* Perform the dot product in an OpenMP parallel region for loop with a sum reduction For illustration purposes: - Explicitly sets number of threads - Each thread keeps track of its partial sum */ #pragma omp parallel private(i,tid,psum) num_threads(threads) { psum = 0.0; tid = omp_get_thread_num(); #pragma omp for reduction(+:sum) for (i=0; i<len*threads; i++) { sum += (a[i] * b[i]); psum = sum; } printf("Thread %d partial sum = %f\n",tid, psum); } printf ("Done. OpenMP version: sum = %f \n", sum); free (a); free (b); }
GB_binop__lxor_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_fp64) // A*D function (colscale): GB (_AxD__lxor_fp64) // D*A function (rowscale): GB (_DxB__lxor_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_fp64) // C=scalar+B GB (_bind1st__lxor_fp64) // C=scalar+B' GB (_bind1st_tran__lxor_fp64) // C=A+scalar GB (_bind2nd__lxor_fp64) // C=A'+scalar GB (_bind2nd_tran__lxor_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_FP64 || GxB_NO_LXOR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bfsdfs.h
namespace TSnap { ///////////////////////////////////////////////// // BFS and DFS /// Returns a directed Breadth-First-Search tree rooted at StartNId. ##GetBfsTree1 template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn); /// Returns the BFS tree size (number of nodes) and depth (number of levels) by following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true) of node StartNId. template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSzX, int& TreeDepthX); /// Finds IDs of all nodes that are at distance Hop from node StartNId. ##GetSubTreeSz template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir=false); /// Returns the number of nodes at each hop distance from the starting node StartNId. ##GetNodesAtHops template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir=false); ///////////////////////////////////////////////// // Shortest paths /// Returns the length of the shortest path from node SrcNId to node DstNId. ##GetShortPath1 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); /// Returns the length of the shortest path from node SrcNId to all other nodes in the network. ##GetShortPath2 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=TInt::Mx); ///////////////////////////////////////////////// // Diameter /// Returns the (approximation of the) Diameter (maximum shortest path length) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsFullDiam template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter (90-th percentile of the distribution of shortest path lengths) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam1 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter and the Diameter of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam2 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX); /// Returns the (approximation of the) Effective Diameter, the Diameter and the Average Shortest Path length in a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam3 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); /// Returns the (approximation of the) Effective Diameter, the Diameter and the Average Shortest Path length in a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiamAll template <class PGraph> double GetBfsEffDiamAll(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); /// Use the whole graph (all edges) to measure the shortest path lengths but only report the path lengths between nodes in the SubGraphNIdV. ##GetBfsEffDiam4 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiamX, int& FullDiamX); // TODO: Implement in the future //template <class PGraph> int GetRangeDist(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=1000); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetShortPath(TIntH& NIdPrnH, TCcQueue<int>& NIdQ, const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> PNGraph GetShortPathsSubGraph(const PGraph& Graph, const TIntV& SubGraphNIdV); //template <class PGraph> PGraph GetWccPathsSubGraph(const PGraph& Graph, const TIntV& NIdV); //template <class PGraph> void GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOutEdges, int& TreeSz, int& TreeDepth); } // namespace TSnap //#////////////////////////////////////////////// /// Breath-First-Search class. /// The class is meant for executing many BFSs over a fixed graph. This means that the class can keep the hash tables and queues initialized between different calls of the DoBfs() function. template<class PGraph> class TBreathFS { public: PGraph Graph; TSnapQueue<int> Queue; TInt StartNId; TIntH NIdDistH; public: TBreathFS(const PGraph& GraphPt, const bool& InitBigQ=true) : Graph(GraphPt), Queue(InitBigQ?Graph->GetNodes():1024), NIdDistH(InitBigQ?Graph->GetNodes():1024) { } /// Sets the graph to be used by the BFS to GraphPt and resets the data structures. void SetGraph(const PGraph& GraphPt); /// Performs BFS from node id StartNode for at maps MxDist steps by only following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true). int DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Same functionality as DoBfs with better performance. int DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Returns the number of nodes visited/reached by the BFS. int GetNVisited() const { return NIdDistH.Len(); } /// Returns the IDs of the nodes visited/reached by the BFS. void GetVisitedNIdV(TIntV& NIdV) const { NIdDistH.GetKeyV(NIdV); } /// Returns the shortst path distance between SrcNId and DistNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetHops(const int& SrcNId, const int& DstNId) const; /// Returns a random shortest path from SrcNId to DstNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const; /* Private variables and functions for DoBfsHybrid */ private: int Stage; // 0, 2: top down, 1: bottom up static const unsigned int alpha = 100; static const unsigned int beta = 20; /* Private functions */ bool TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); bool BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); }; template<class PGraph> void TBreathFS<PGraph>::SetGraph(const PGraph& GraphPt) { Graph=GraphPt; const int N=GraphPt->GetNodes(); if (Queue.Reserved() < N) { Queue.Gen(N); } if (NIdDistH.GetReservedKeyIds() < N) { NIdDistH.Gen(N); } } template<class PGraph> int TBreathFS<PGraph>::DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); // const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // IAssertR(StartNodeI.GetOutDeg() > 0, TStr::Fmt("No neighbors from start node %d.", StartNode)); NIdDistH.Clr(false); NIdDistH.AddDat(StartNId, 0); Queue.Clr(false); Queue.Push(StartNId); int v, MaxDist = 0; while (! Queue.Empty()) { const int NId = Queue.Top(); Queue.Pop(); const int Dist = NIdDistH.GetDat(NId); if (Dist == MxDist) { break; } // max distance limit reached const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { // out-links for (v = 0; v < NodeI.GetOutDeg(); v++) { // out-links const int DstNId = NodeI.GetOutNId(v); if (! NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist+1); MaxDist = TMath::Mx(MaxDist, Dist+1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } if (FollowIn) { // in-links for (v = 0; v < NodeI.GetInDeg(); v++) { const int DstNId = NodeI.GetInNId(v); if (! NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist+1); MaxDist = TMath::Mx(MaxDist, Dist+1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } } return MaxDist; } template<class PGraph> int TBreathFS<PGraph>::DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); if (TargetNId == StartNode) return 0; const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // Initialize vector TIntV NIdDistV(Graph->GetMxNId() + 1); for (int i = 0; i < NIdDistV.Len(); i++) { NIdDistV.SetVal(i, -1); } TIntV *Frontier = new TIntV(Graph->GetNodes(), 0); TIntV *NextFrontier = new TIntV(Graph->GetNodes(), 0); NIdDistV.SetVal(StartNId, 0); Frontier->Add(StartNId); Stage = 0; int MaxDist = -1; const unsigned int TotalNodes = Graph->GetNodes(); unsigned int UnvisitedNodes = Graph->GetNodes(); while (! Frontier->Empty()) { MaxDist += 1; NextFrontier->Clr(false); if (MaxDist == MxDist) { break; } // max distance limit reached UnvisitedNodes -= Frontier->Len(); if (Stage == 0 && UnvisitedNodes / Frontier->Len() < alpha) { Stage = 1; } else if (Stage == 1 && TotalNodes / Frontier->Len() > beta) { Stage = 2; } // Top down or bottom up depending on stage bool targetFound = false; if (Stage == 0 || Stage == 2) { targetFound = TopDownStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } else { targetFound = BottomUpStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } if (targetFound) { MaxDist = NIdDistV[TargetNId]; break; } // swap Frontier and NextFrontier TIntV *temp = Frontier; Frontier = NextFrontier; NextFrontier = temp; } delete Frontier; delete NextFrontier; // Transform vector to hash table NIdDistH.Clr(false); for (int NId = 0; NId < NIdDistV.Len(); NId++) { if (NIdDistV[NId] != -1) { NIdDistH.AddDat(NId, NIdDistV[NId]); } } return MaxDist; } template<class PGraph> bool TBreathFS<PGraph>::TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (TIntV::TIter it = Frontier->BegI(); it != Frontier->EndI(); ++it) { // loop over frontier const int NId = *it; const int Dist = NIdDistV[NId]; IAssert(Dist == MaxDist); // Must equal to MaxDist const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int NeighborNId = NodeI.GetOutNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } if (FollowIn) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int NeighborNId = NodeI.GetInNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } } return false; } template<class PGraph> bool TBreathFS<PGraph>::BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (typename PGraph::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { const int NId = NodeI.GetId(); if (NIdDistV[NId] == -1) { if (FollowOut) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int ParentNId = NodeI.GetInNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } if (FollowIn && NIdDistV[NId] == -1) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int ParentNId = NodeI.GetOutNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } } } return false; } template<class PGraph> int TBreathFS<PGraph>::GetHops(const int& SrcNId, const int& DstNId) const { TInt Dist; if (SrcNId!=StartNId) { return -1; } if (! NIdDistH.IsKeyGetDat(DstNId, Dist)) { return -1; } return Dist.Val; } template<class PGraph> int TBreathFS<PGraph>::GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const { PathNIdV.Clr(false); if (SrcNId!=StartNId || ! NIdDistH.IsKey(DstNId)) { return -1; } PathNIdV.Add(DstNId); TIntV CloserNIdV; int CurNId = DstNId; TInt CurDist, NextDist; while (CurNId != SrcNId) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(CurNId); IAssert(NIdDistH.IsKeyGetDat(CurNId, CurDist)); CloserNIdV.Clr(false); for (int e = 0; e < NI.GetDeg(); e++) { const int Next = NI.GetNbrNId(e); if (NIdDistH.IsKeyGetDat(Next, NextDist)) { if (NextDist == CurDist-1) { CloserNIdV.Add(Next); } } } IAssert(! CloserNIdV.Empty()); CurNId = CloserNIdV[TInt::Rnd.GetUniDevInt(CloserNIdV.Len())]; PathNIdV.Add(CurNId); } PathNIdV.Reverse(); return PathNIdV.Len()-1; } ///////////////////////////////////////////////// // Implementation namespace TSnap { template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); PNGraph Tree = TNGraph::New(); BFS.NIdDistH.SortByDat(); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { const int NId = BFS.NIdDistH.GetKey(i); const int Dist = BFS.NIdDistH[i]; typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); if (!Tree->IsNode(NId)) { Tree->AddNode(NId); } if (FollowOut) { for (int e = 0; e < NI.GetInDeg(); e++) { const int Prev = NI.GetInNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } if (FollowIn) { for (int e = 0; e < NI.GetOutDeg(); e++) { const int Prev = NI.GetOutNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } } return Tree; } template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSz, int& TreeDepth) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); TreeSz = BFS.NIdDistH.Len(); TreeDepth = 0; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { TreeDepth = TMath::Mx(TreeDepth, BFS.NIdDistH[i].Val); } return TreeSz; } template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, Hop); NIdV.Clr(false); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { if (BFS.NIdDistH[i] == Hop) { NIdV.Add(BFS.NIdDistH.GetKey(i)); } } return NIdV.Len(); } template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, TInt::Mx); TIntH HopCntH; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { HopCntH.AddDat(BFS.NIdDistH[i]) += 1; } HopCntH.GetKeyDatPrV(HopCntV); HopCntV.Sort(); return HopCntV.Len(); } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir, const int& MaxDist) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, -1, MaxDist); NIdToDistH.Clr(); NIdToDistH.Swap(BFS.NIdDistH); return NIdToDistH[NIdToDistH.Len()-1]; } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, DstNId, TInt::Mx); return BFS.GetHops(SrcNId, DstNId); } template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return FullDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return EffDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam) { double AvgDiam; EffDiam = -1; FullDiam = -1; return GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgDiam); } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { EffDiam = -1; FullDiam = -1; AvgSPL = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV; Graph->GetNIdV(NodeIdV); NodeIdV.Shuffle(TInt::Rnd); for (int tries = 0; tries < TMath::Mn(NTestNodes, Graph->GetNodes()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { DistToCntH.AddDat(BFS.NIdDistH[i]) += 1; } } TIntFltKdV DistNbrsPdfV; double SumPathL=0, PathCnt=0; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); SumPathL += DistToCntH.GetKey(i) * DistToCntH[i]; PathCnt += DistToCntH[i]; } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) AvgSPL = SumPathL/PathCnt; // average shortest path length return EffDiam; } template <class PGraph> double GetBfsEffDiamAll(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { return GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgSPL); } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiam, int& FullDiam) { EffDiam = -1; FullDiam = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV(SubGraphNIdV); NodeIdV.Shuffle(TInt::Rnd); TInt Dist; for (int tries = 0; tries < TMath::Mn(NTestNodes, SubGraphNIdV.Len()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < SubGraphNIdV.Len(); i++) { if (BFS.NIdDistH.IsKeyGetDat(SubGraphNIdV[i], Dist)) { DistToCntH.AddDat(Dist) += 1; } } } TIntFltKdV DistNbrsPdfV; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) return EffDiam; // average shortest path length } template <class PGraph> int GetShortestDistances(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { PSOut StdOut = TStdOut::New(); int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (ShortestDists[OutNId].Val == InfDepth) { ShortestDists[OutNId] = Depth; PNextV->Add(OutNId); } } } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #ifdef USE_OPENMP template <class PGraph> int GetShortestDistancesMP2(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); #pragma omp parallel for schedule(dynamic,10000) for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth #pragma omp parallel for schedule(dynamic,10000) for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (__sync_bool_compare_and_swap(&(ShortestDists[OutNId].Val), InfDepth, Depth)) { PNextV->AddMP(OutNId); } } } // #pragma omp parallel for schedule(dynamic,10000) // for (int NId = 0; NId < MxNId; NId++) { // if (ShortestDists[NId] == InfDepth) { // typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); // for (int e = 0; e < NI.GetInDeg(); e++) { // const int InNId = NI.GetInNId(e); // if (ShortestDists[InNId] < Depth) { // ShortestDists[NId] = Depth; // PNextV->AddMP(NId); // break; // } // } // } // } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #endif // USE_OPENMP } // namespace TSnap
GB_binop__bshift_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint32) // C=scalar+B GB (_bind1st__bshift_uint32) // C=scalar+B' GB (_bind1st_tran__bshift_uint32) // C=A+scalar GB (_bind2nd__bshift_uint32) // C=A'+scalar GB (_bind2nd_tran__bshift_uint32) // C type: uint32_t // A type: uint32_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint32 (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_bitshift_uint32 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT32 || GxB_NO_BSHIFT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bshift_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_uint32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = GB_bitshift_uint32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint32 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint32 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix_bits.h
#ifndef MATRIX_BITS_H_ #define MATRIX_BITS_H_ namespace acspo { template <typename T> matrix<T> & operator&=(matrix<T> &mat1, const matrix<T> &mat2) { if (mat1.size() != mat2.size()) { throw std::runtime_error("dimension mismatch"); } unsigned int elem = mat1.elem(); T *m1ptr = mat1.ptr(); const T *m2ptr = mat2.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { m1ptr[i] &= m2ptr[i]; } return mat1; } template <typename T> matrix<T> & operator|=(matrix<T> &mat1, const matrix<T> &mat2) { if (mat1.size() != mat2.size()) { throw std::runtime_error("dimension mismatch"); } unsigned int elem = mat1.elem(); T *m1ptr = mat1.ptr(); const T *m2ptr = mat2.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { m1ptr[i] |= m2ptr[i]; } return mat1; } template <typename T> matrix<T> & operator^=(matrix<T> &mat1, const matrix<T> &mat2) { if (mat1.size() != mat2.size()) { throw std::runtime_error("dimension mismatch"); } unsigned int elem = mat1.elem(); T *m1ptr = mat1.ptr(); const T *m2ptr = mat2.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { m1ptr[i] ^= m2ptr[i]; } return mat1; } template <typename T, typename S> matrix<T> & operator&=(matrix<T> &mat, const S &val) { unsigned int elem = mat.elem(); T *mptr = mat.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { mptr[i] &= val; } return mat; } template <typename T, typename S> matrix<T> & operator|=(matrix<T> &mat, const S &val) { unsigned int elem = mat.elem(); T *mptr = mat.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { mptr[i] |= val; } return mat; } template <typename T, typename S> matrix<T> & operator^=(matrix<T> &mat, const S &val) { unsigned int elem = mat.elem(); T *mptr = mat.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { mptr[i] *= val; } return mat; } template <typename T> matrix<T> operator&(const matrix<T> &mat1, const matrix<T> &mat2) { if (mat1.size() != mat2.size()) { throw std::runtime_error("dimension mismatch"); } unsigned int elem = mat1.elem(); matrix<T> ret(mat1.size()); T *rptr = ret.ptr(); const T *m1ptr = mat1.ptr(); const T *m2ptr = mat2.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { rptr[i] = m1ptr[i] & m2ptr[i]; } return ret; } template <typename T> matrix<T> operator|(const matrix<T> &mat1, const matrix<T> &mat2) { if (mat1.size() != mat2.size()) { throw std::runtime_error("dimension mismatch"); } unsigned int elem = mat1.elem(); matrix<T> ret(mat1.size()); T *rptr = ret.ptr(); const T *m1ptr = mat1.ptr(); const T *m2ptr = mat2.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { rptr[i] = m1ptr[i] | m2ptr[i]; } return ret; } template <typename T> matrix<T> operator^(const matrix<T> &mat1, const matrix<T> &mat2) { if (mat1.size() != mat2.size()) { throw std::runtime_error("dimension mismatch"); } unsigned int elem = mat1.elem(); matrix<T> ret(mat1.size()); T *rptr = ret.ptr(); const T *m1ptr = mat1.ptr(); const T *m2ptr = mat2.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { rptr[i] = m1ptr[i] ^ m2ptr[i]; } return ret; } template <typename T, typename S> matrix<T> operator&(const matrix<T> &mat, const S &val) { unsigned int elem = mat.elem(); matrix<T> ret(mat.size()); T *rptr = ret.ptr(); const T *mptr = mat.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { rptr[i] = mptr[i] & val; } return ret; } template <typename T, typename S> matrix<T> operator|(const matrix<T> &mat, const S &val) { unsigned int elem = mat.elem(); matrix<T> ret(mat.size()); T *rptr = ret.ptr(); const T *mptr = mat.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { rptr[i] = mptr[i] | val; } return ret; } template <typename T, typename S> matrix<T> operator^(const matrix<T> &mat, const S &val) { unsigned int elem = mat.elem(); matrix<T> ret(mat.size()); T *rptr = ret.ptr(); const T *mptr = mat.ptr(); #pragma omp parallel for simd for (unsigned int i = 0; i < elem; i++) { rptr[i] = mptr[i] ^ val; } return ret; } template <typename T, typename S> matrix<T> operator&(const S &val, const matrix<T> &mat) { return mat & val; } template <typename T, typename S> matrix<T> operator|(const S &val, const matrix<T> &mat) { return mat | val; } template <typename T, typename S> matrix<T> operator^(const S &val, const matrix<T> &mat) { return mat ^ val; } } #endif
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(4*4, inch, outch, 2ul); // G const short ktm[4][3] = { { 2, 0, 0}, { 1, 1, 1}, { 1, -1, 1}, { 0, 0, 2} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<4; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; const short* kernel4 = (const short*)kernel_tm + (p+4)*inch*16; const short* kernel5 = (const short*)kernel_tm + (p+5)*inch*16; const short* kernel6 = (const short*)kernel_tm + (p+6)*inch*16; const short* kernel7 = (const short*)kernel_tm + (p+7)*inch*16; short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; kernel4 += 16; kernel5 += 16; kernel6 += 16; kernel7 += 16; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm + p*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 16; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*4, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j=0; j<nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i<nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON #if __aarch64__ asm volatile( // load "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v1.8b}, [%1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.8b}, [%2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v3.8b}, [%3] \n" // w = B_t * d, trans int8 to int16 "ssubl v4.8h, v0.8b, v2.8b \n" // d4 "saddl v5.8h, v1.8b, v2.8b \n" // d6 "ssubl v6.8h, v2.8b, v1.8b \n" // d8 "ssubl v7.8h, v3.8b, v1.8b \n" // d10 // transpose w to w_t "trn1 v8.4h, v4.4h, v5.4h \n" "trn2 v9.4h, v4.4h, v5.4h \n" "trn1 v10.4h, v6.4h, v7.4h \n" "trn2 v11.4h, v6.4h, v7.4h \n" "trn1 v0.2s, v8.2s, v10.2s \n" "trn2 v2.2s, v8.2s, v10.2s \n" "trn1 v1.2s, v9.2s, v11.2s \n" "trn2 v3.2s, v9.2s, v11.2s \n" // U = B_t * d_t "sub v4.4h, v0.4h, v2.4h \n" "add v5.4h, v1.4h, v2.4h \n" "sub v6.4h, v2.4h, v1.4h \n" "sub v7.4h, v3.4h, v1.4h \n" // save "st1 {v4.4h}, [%4] \n" "st1 {v5.4h}, [%5] \n" "st1 {v6.4h}, [%6] \n" "st1 {v7.4h}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else asm volatile( // load "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "pld [%1, #64] \n" "vld1.s8 {d1}, [%1] \n" "pld [%2, #64] \n" "vld1.s8 {d2}, [%2] \n" "pld [%3, #64] \n" "vld1.s8 {d3}, [%3] \n" // w = B_t * d, trans int8 to int16 "vsubl.s8 q2, d0, d2 \n" // d4 "vaddl.s8 q3, d1, d2 \n" // d6 "vsubl.s8 q4, d2, d1 \n" // d8 "vsubl.s8 q5, d3, d1 \n" // d10 // transpose w to w_t "vtrn.s16 d4, d6 \n" "vtrn.s16 d8, d10 \n" "vtrn.s32 d4, d8 \n" "vtrn.s32 d6, d10 \n" // U = B_t * d_t "vsub.s16 d11, d4, d8 \n" "vadd.s16 d12, d6, d8 \n" "vsub.s16 d13, d8, d6 \n" "vsub.s16 d14, d10, d6 \n" // save "vst1.s32 {d11}, [%4] \n" "vst1.s32 {d12}, [%5] \n" "vst1.s32 {d13}, [%6] \n" "vst1.s32 {d14}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #endif // __aarch64__ #else short d0[4],d1[4],d2[4],d3[4]; short w0[4],w1[4],w2[4],w3[4]; short t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm1[n] = d1[n]; out_tm2[n] = d2[n]; out_tm3[n] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<4; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; output4_tm += 16; output5_tm += 16; output6_tm += 16; output7_tm += 16; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) //"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%1] \n" "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; #if __ARM_NEON int32x2_t _shift = vdup_n_s32(-2); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2; "sub v1.4s, v1.4s, v2.4s \n" "add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3; "add v1.4s, v1.4s, v3.4s \n" "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "dup v6.2d, v4.d[1] \n" "dup v7.2d, v5.d[1] \n" "add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2; "sub v1.2s, v5.2s, v6.2s \n" "add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3; "add v1.2s, v1.2s, v7.2s \n" "sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2 "sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2 "st1 {v0.2s}, [%1], #8 \n" "st1 {v1.2s}, [%2], #8 \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2; "vsubq.s32 q1, q1, q2 \n" "vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3; "vaddq.s32 q1, q1, q3 \n" "vtrn.s32 q0, q1 \n" "vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2; "vsub.s32 d9, d2, d1 \n" "vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3; "vadd.s32 d9, d9, d3 \n" "vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2 "vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2 "vst1.s32 {d8}, [%1]! \n" "vst1.s32 {d9}, [%2]! \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q4" ); #endif // __aarch64__ #else int s0[4],s1[4],s2[4],s3[4]; int w0[4],w1[4]; int d0[2],d1[2],d2[2],d3[2]; int o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; out_tile += 16; outRow0 += 2; outRow1 += 2; #endif // __ARM_NEON } outRow0 += outw; outRow1 += outw; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(6*6, inch, outch, 2ul); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { { 6, 0, 0}, { -4, -4, -4}, { -4, 4, -4}, { 1, 2, 4}, { 1, -2, 4}, { 0, 0, 24} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i=0; i<6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<6; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<6; i++) { kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<9; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*36; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*36; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*36; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*36; const short* kernel4 = (const short*)kernel_tm + (p+4)*inch*36; const short* kernel5 = (const short*)kernel_tm + (p+5)*inch*36; const short* kernel6 = (const short*)kernel_tm + (p+6)*inch*36; const short* kernel7 = (const short*)kernel_tm + (p+7)*inch*36; short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*36; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*36; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*36; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*36; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm + p*inch*36; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 36; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; int* outRow2 = outRow0 + outw * 2; int* outRow3 = outRow0 + outw * 3; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = _o0[n] / 576; outRow1[n] = _o1[n] / 576; outRow2[n] = _o2[n] / 576; outRow3[n] = _o3[n] / 576; } #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop // "prfm pldl1keep, [%8, #128] \n" // "prfm pldl1keep, [%9, #128] \n" "ld1 {v8.4h}, [%8], #8 \n" // _r0 = vld1_s16(r0); "prfm pldl1keep, [%9, #384] \n" "ld1 {v9.4h, v10.4h, v11.4h, v12.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "prfm pldl1keep, [%9, #384] \n" "ld1 {v13.4h, v14.4h, v15.4h, v16.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) // "add %9, %9, #16 \n" // "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); // "add %9, %9, #16 \n" // "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); // "add %9, %9, #16 \n" // "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); // "add %8, %8, #8 \n" // "add %9, %9, #16 \n" "subs w4, w4, #2 \n" // "prfm pldl1keep, [%8, #128] \n" // "prfm pldl1keep, [%9, #128] \n" "ld1 {v17.4h}, [%8], #8 \n" // _r0 = vld1_s16(r0); "prfm pldl1keep, [%9, #384] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "prfm pldl1keep, [%9, #384] \n" "ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "ld1 {v8.4h}, [%8], #8 \n" // _r0 = vld1_s16(r0); "prfm pldl1keep, [%9, #384] \n" "ld1 {v9.4h, v10.4h, v11.4h, v12.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "prfm pldl1keep, [%9, #384] \n" "ld1 {v13.4h, v14.4h, v15.4h, v16.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "smlal v0.4s, v17.4h, v18.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v17.4h, v19.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v17.4h, v20.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v17.4h, v21.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v17.4h, v22.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v17.4h, v23.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v17.4h, v24.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v17.4h, v25.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); float* outRow0 = top_blob_bordered.channel(p); float* outRow1 = outRow0 + outw; float* outRow2 = outRow0 + outw * 2; float* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; const float scale_dequant0 = scales_dequant[p]; const float scale0 = scale_dequant0 / 576.0; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm float32x4_t _scale0 = vdupq_n_f32(scale0); float32x4_t _out0_f32 = vdupq_n_f32(bias0); float32x4_t _out1_f32 = vdupq_n_f32(bias0); float32x4_t _out2_f32 = vdupq_n_f32(bias0); float32x4_t _out3_f32 = vdupq_n_f32(bias0); _out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0); _out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_o1), _scale0); _out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_o2), _scale0); _out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0); vst1q_f32(outRow0, _out0_f32); vst1q_f32(outRow1, _out1_f32); vst1q_f32(outRow2, _out2_f32); vst1q_f32(outRow3, _out3_f32); #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = (float)o0[n] * scale0 + bias0; outRow1[n] = (float)o1[n] * scale0 + bias0; outRow2[n] = (float)o2[n] * scale0 + bias0; outRow3[n] = (float)o3[n] * scale0 + bias0; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_requant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop // "prfm pldl1keep, [%8, #128] \n" // "prfm pldl1keep, [%9, #128] \n" "ld1 {v8.4h}, [%8], #8 \n" // _r0 = vld1_s16(r0); "prfm pldl1keep, [%9, #384] \n" "ld1 {v9.4h, v10.4h, v11.4h, v12.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "prfm pldl1keep, [%9, #384] \n" "ld1 {v13.4h, v14.4h, v15.4h, v16.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) // "add %9, %9, #16 \n" // "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); // "add %9, %9, #16 \n" // "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); // "add %9, %9, #16 \n" // "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); // "add %8, %8, #8 \n" // "add %9, %9, #16 \n" "subs w4, w4, #2 \n" // "prfm pldl1keep, [%8, #128] \n" // "prfm pldl1keep, [%9, #128] \n" "ld1 {v17.4h}, [%8], #8 \n" // _r0 = vld1_s16(r0); "prfm pldl1keep, [%9, #384] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "prfm pldl1keep, [%9, #384] \n" "ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "ld1 {v8.4h}, [%8], #8 \n" // _r0 = vld1_s16(r0); "prfm pldl1keep, [%9, #384] \n" "ld1 {v9.4h, v10.4h, v11.4h, v12.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "prfm pldl1keep, [%9, #384] \n" "ld1 {v13.4h, v14.4h, v15.4h, v16.4h}, [%9], #32 \n" // _k01 = vld1q_s16(kptr); "smlal v0.4s, v17.4h, v18.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v17.4h, v19.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v17.4h, v20.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v17.4h, v21.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v17.4h, v22.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v17.4h, v23.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v17.4h, v24.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v17.4h, v25.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 1u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); signed char* outRow0 = top_blob_bordered.channel(p); signed char* outRow1 = outRow0 + outw; signed char* outRow2 = outRow0 + outw * 2; signed char* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; const float scale_int = scales_requant[2*p] / 576.0; const float scale_out = scales_requant[2*p+1]; float32x4_t _scale_int = vdupq_n_f32(scale_int); float32x4_t _scale_out = vdupq_n_f32(scale_out); float32x4_t _bias0 = vdupq_n_f32(bias0); for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm float32x4_t _out0_f32 = _bias0; float32x4_t _out1_f32 = _bias0; float32x4_t _out2_f32 = _bias0; float32x4_t _out3_f32 = _bias0; _out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale_int); _out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_o1), _scale_int); _out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_o2), _scale_int); _out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale_int); _out0_f32 = vmulq_f32(_out0_f32, _scale_out); _out1_f32 = vmulq_f32(_out1_f32, _scale_out); _out2_f32 = vmulq_f32(_out2_f32, _scale_out); _out3_f32 = vmulq_f32(_out3_f32, _scale_out); #if __aarch64__ int16x4_t _out0_s16 = vqmovn_s32(vcvtaq_s32_f32(_out0_f32)); int16x4_t _out1_s16 = vqmovn_s32(vcvtaq_s32_f32(_out1_f32)); int16x4_t _out2_s16 = vqmovn_s32(vcvtaq_s32_f32(_out2_f32)); int16x4_t _out3_s16 = vqmovn_s32(vcvtaq_s32_f32(_out3_f32)); #else int16x4_t _out0_s16 = vqmovn_s32(vcvtq_s32_f32(_out0_f32)); int16x4_t _out1_s16 = vqmovn_s32(vcvtq_s32_f32(_out1_f32)); int16x4_t _out2_s16 = vqmovn_s32(vcvtq_s32_f32(_out2_f32)); int16x4_t _out3_s16 = vqmovn_s32(vcvtq_s32_f32(_out3_f32)); #endif int8x8_t _out01_s8 = vqmovn_s16(vcombine_s16(_out0_s16, _out1_s16)); int8x8_t _out23_s8 = vqmovn_s16(vcombine_s16(_out2_s16, _out3_s16)); outRow0[0] = _out01_s8[0]; outRow0[1] = _out01_s8[1]; outRow0[2] = _out01_s8[2]; outRow0[3] = _out01_s8[3]; outRow1[0] = _out01_s8[4]; outRow1[1] = _out01_s8[5]; outRow1[2] = _out01_s8[6]; outRow1[3] = _out01_s8[7]; outRow2[0] = _out23_s8[0]; outRow2[1] = _out23_s8[1]; outRow2[2] = _out23_s8[2]; outRow2[3] = _out23_s8[3]; outRow3[0] = _out23_s8[4]; outRow3[1] = _out23_s8[5]; outRow3[2] = _out23_s8[6]; outRow3[3] = _out23_s8[7]; #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = float2int8(((float)o0[n] * scale_int + bias0) * scale_out); outRow1[n] = float2int8(((float)o1[n] * scale_int + bias0) * scale_out); outRow2[n] = float2int8(((float)o2[n] * scale_int + bias0) * scale_out); outRow3[n] = float2int8(((float)o3[n] * scale_int + bias0) * scale_out); } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*9, inch, outch/8 + outch%8, (size_t)1u); const signed char* kernel = _kernel; int p=0; for (; p+7<outch; p+=8) { const signed char* k0 = kernel + (p+0)*inch*9; const signed char* k1 = kernel + (p+1)*inch*9; const signed char* k2 = kernel + (p+2)*inch*9; const signed char* k3 = kernel + (p+3)*inch*9; const signed char* k4 = kernel + (p+4)*inch*9; const signed char* k5 = kernel + (p+5)*inch*9; const signed char* k6 = kernel + (p+6)*inch*9; const signed char* k7 = kernel + (p+7)*inch*9; signed char* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*9; signed char* ktmp = kernel_tm.channel(p/8 + p%8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p+0); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); out0.fill(0); out1.fill(0); out2.fill(0); out3.fill(0); out4.fill(0); out5.fill(0); out6.fill(0); out7.fill(0); const signed char* ktmp = _kernel.channel(p/8); for (int q=0; q<inch; q++) { int* outptr0 = out0; int* outptr1 = out1; int* outptr2 = out2; int* outptr3 = out3; int* outptr4 = out4; int* outptr5 = out5; int* outptr6 = out6; int* outptr7 = out7; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%9], #16 \n"//r0-r2 "ld2 {v5.8b, v6.8b}, [%9] \n" "ld1 {v8.4s, v9.4s}, [%1] \n"//out0 "ld1 {v10.4s, v11.4s}, [%2] \n"//out1 "ld1 {v12.4s, v13.4s}, [%3] \n"//out2 "ld1 {v14.4s, v15.4s}, [%4] \n"//out3 "ld1 {v16.4s, v17.4s}, [%5] \n"//out4 "ld1 {v18.4s, v19.4s}, [%6] \n"//out5 "ld1 {v20.4s, v21.4s}, [%7] \n"//out6 "ld1 {v22.4s, v23.4s}, [%8] \n"//out7 "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k00-k70) "sshll v1.8h, v1.8b, #0 \n"//(k01-k71) "sshll v2.8h, v2.8b, #0 \n"//(k02-k72) "sshll v3.8h, v3.8b, #0 \n"// r0 "sshll v4.8h, v4.8b, #0 \n"// r1 "sshll v7.8h, v7.8b, #0 \n"// r2 // r0 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r00-r07)*k00 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r00-r07)*k10 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r00-r07)*k20 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r00-r07)*k30 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r00-r07)*k40 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r00-r07)*k50 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r00-r07)*k60 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r00-r07)*k70 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r1 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r10-r17)*k01 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r10-r17)*k11 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r10-r17)*k21 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r10-r17)*k31 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r10-r17)*k41 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r10-r17)*k51 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r10-r17)*k61 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r10-r17)*k71 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r2 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r20-r27)*k02 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r20-r27)*k12 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r20-r27)*k22 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r20-r27)*k32 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r20-r27)*k42 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r20-r27)*k52 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r20-r27)*k62 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r20-r27)*k72 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%10], #16 \n"//r3-r5 "ld2 {v5.8b, v6.8b}, [%10] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k03-k73) "sshll v1.8h, v1.8b, #0 \n"//(k04-k74) "sshll v2.8h, v2.8b, #0 \n"//(k05-k75) "sshll v3.8h, v3.8b, #0 \n"// r3 "sshll v4.8h, v4.8b, #0 \n"// r4 "sshll v7.8h, v7.8b, #0 \n"// r5 // r3 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r30-r37)*k03 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r30-r37)*k13 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r30-r37)*k23 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r30-r37)*k33 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r30-r37)*k43 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r30-r37)*k53 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r30-r37)*k63 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r30-r37)*k73 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r4 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r40-r47)*k04 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r40-r47)*k14 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r40-r47)*k24 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r40-r47)*k34 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r40-r47)*k44 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r40-r47)*k54 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r40-r47)*k64 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r40-r47)*k74 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r5 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r50-r57)*k05 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r50-r57)*k15 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r50-r57)*k25 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r50-r57)*k35 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r50-r57)*k45 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r50-r57)*k55 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r50-r57)*k65 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r50-r57)*k75 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%11], #16 \n"//r6-r8 "ld2 {v5.8b, v6.8b}, [%11] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k06-k76) "sshll v1.8h, v1.8b, #0 \n"//(k07-k77) "sshll v2.8h, v2.8b, #0 \n"//(k08-k78) "sshll v3.8h, v3.8b, #0 \n"// r6 "sshll v4.8h, v4.8b, #0 \n"// r7 "sshll v7.8h, v7.8b, #0 \n"// r8 // r6 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r60-r67)*k06 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r60-r67)*k16 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r60-r67)*k26 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r60-r67)*k36 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r60-r67)*k46 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r60-r67)*k56 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r60-r67)*k66 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r60-r67)*k76 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r7 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r70-r77)*k07 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r70-r77)*k17 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r70-r77)*k27 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r70-r77)*k37 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r70-r77)*k47 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r70-r77)*k57 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r70-r77)*k67 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r70-r77)*k77 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r8 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r80-r87)*k08 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r80-r87)*k18 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r80-r87)*k28 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r80-r87)*k38 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r80-r87)*k48 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r80-r87)*k58 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r80-r87)*k68 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r80-r87)*k78 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "st1 {v16.4s, v17.4s}, [%5], #32 \n" "st1 {v18.4s, v19.4s}, [%6], #32 \n" "st1 {v20.4s, v21.4s}, [%7], #32 \n" "st1 {v22.4s, v23.4s}, [%8], #32 \n" "subs %w0, %w0, #1 \n" "sub %12, %12, #72 \n"// reset ktmp "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.s32 {d16-d17}, [%1] \n"// out0 "pld [%2, #128] \n" "vld1.s32 {d18-d19}, [%2] \n"// out1 "pld [%3, #128] \n" "vld1.s32 {d20-d21}, [%3] \n"// out2 "pld [%4, #128] \n" "vld1.s32 {d22-d23}, [%4] \n"// out3 // r0 "pld [%9, #64] \n" "vld2.s8 {d8-d9}, [%9] \n"// d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015) "add %9, #8 \n" "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k00-k70) d1(k01-k71) d2(k02-k72) "pld [%5, #128] \n" "vld1.s32 {d24-d25}, [%5] \n"// out4 "pld [%6, #128] \n" "vld1.s32 {d26-d27}, [%6] \n"// out5 "vmovl.s8 q2, d2 \n"// q2(k02-k72) "vmovl.s8 q1, d1 \n"// q1(k01-k71) "vmovl.s8 q0, d0 \n"// q0(k00-k70) "vext.s8 d12, d8, d8, #1 \n"// d12(a02 a04 a06 a08 x x x x) "pld [%7, #128] \n" "vld1.s32 {d28-d29}, [%7] \n"// out6 "vmovl.s8 q5, d9 \n"// q5(a01 a03 a05 a07 a09 a011 a013 a015) d11 "vmovl.s8 q4, d8 \n"// q4(a00 a02 a04 a06 a08 a010 a012 a014) d9 "vmovl.s8 q6, d12 \n"// q6(a02 a04 a06 a08 a010 a012 a014 a016) d13 "pld [%8, #128] \n" "vld1.s32 {d30-d31}, [%8] \n"// out7 "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a00 a02 a04 a06) * k00 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a00 a02 a04 a06) * k10 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a00 a02 a04 a06) * k20 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a00 a02 a04 a06) * k30 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a00 a02 a04 a06) * k40 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a00 a02 a04 a06) * k50 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a00 a02 a04 a06) * k60 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a00 a02 a04 a06) * k70 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a01-a07) * k01 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a01-a07) * k11 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a01-a07) * k21 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a01-a07) * k31 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a01-a07) * k41 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a01-a07) * k51 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a01-a07) * k61 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a01-a07) * k71 "pld [%10, #64] \n" "vld2.s8 {d8-d9}, [%10] \n"// d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115) "add %10, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a02-a08) * k02 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a02-a08) * k12 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a02-a08) * k22 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a02-a08) * k32 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k03-k73) d1(k04-k74) d2(k05-k75) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a02-a08) * k42 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a02-a08) * k52 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a02-a08) * k62 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a02-a08) * k72 // r1 "vext.s8 d12, d8, d8, #1 \n"// d12(a12 a14 a16 a18 x x x x) "vmovl.s8 q2, d2 \n"// q2(k05-k75) "vmovl.s8 q1, d1 \n"// q1(k04-k74) "vmovl.s8 q0, d0 \n"// q0(k03-k73) "vmovl.s8 q5, d9 \n"// q5(a11-a115) "vmovl.s8 q4, d8 \n"// q4(a10-a114) "vmovl.s8 q6, d12 \n"// q6(a12-a116) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a10-a16) * k03 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a10-a16) * k13 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a10-a16) * k23 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a10-a16) * k33 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a10-a16) * k43 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a10-a16) * k53 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a10-a16) * k63 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a10-a16) * k73 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a11-a17) * k04 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a11-a17) * k14 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a11-a17) * k24 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a11-a17) * k34 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a11-a17) * k44 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a11-a17) * k54 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a11-a17) * k64 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a11-a17) * k74 "pld [%11, #64] \n" "vld2.s8 {d8-d9}, [%11] \n"// d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215) "add %11, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a12-a18) * k05 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a12-a18) * k15 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a12-a18) * k25 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a12-a18) * k35 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k06-k76) d1(k07-k77) d2(k08-k78) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a12-a18) * k45 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a12-a18) * k55 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a12-a18) * k65 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a12-a18) * k75 // r2 "vext.s8 d12, d8, d8, #1 \n"// d12(a22 a24 a26 a28 x x x x) "vmovl.s8 q2, d2 \n"// q2(k08-k78) "vmovl.s8 q1, d1 \n"// q1(k07-k77) "vmovl.s8 q0, d0 \n"// q0(k06-k76) "vmovl.s8 q5, d9 \n"// q5(a21-a215) "vmovl.s8 q4, d8 \n"// q4(a20-a214) "vmovl.s8 q6, d12 \n"// q6(a22-a216) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a20-a26) * k06 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a20-a26) * k16 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a20-a26) * k26 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a20-a26) * k36 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a20-a26) * k46 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a20-a26) * k56 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a20-a26) * k66 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a20-a26) * k76 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a21-a27) * k07 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a21-a27) * k17 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a21-a27) * k27 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a21-a27) * k37 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a21-a27) * k47 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a21-a27) * k57 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a21-a27) * k67 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a21-a27) * k77 "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a22-a28) * k08 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a22-a28) * k18 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a22-a28) * k28 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a22-a28) * k38 "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a22-a28) * k48 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a22-a28) * k58 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a22-a28) * k68 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a22-a28) * k78 // save s32 to memory "sub %12, %12, #72 \n" "vst1.s32 {d16-d17}, [%1]! \n"// out0 "vst1.s32 {d18-d19}, [%2]! \n"// out1 "vst1.s32 {d20-d21}, [%3]! \n"// out2 "vst1.s32 {d22-d23}, [%4]! \n"// out3 "subs %0, #1 \n" "vst1.s32 {d24-d25}, [%5]! \n"// out4 "vst1.s32 {d26-d27}, [%6]! \n"// out5 "vst1.s32 {d28-d29}, [%7]! \n"// out6 "vst1.s32 {d30-d31}, [%8]! \n"// out7 "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ int8x8_t _r0_s8 = vld1_s8(r0);// (a00 a01 a02 ....) int8x8_t _r1_s8 = vld1_s8(r1);// (a10 a11 a12 ....) int8x8_t _r2_s8 = vld1_s8(r2);// (a20 a21 a22 ....) int16x8_t _r0 = vmovl_s8(_r0_s8); int16x8_t _r1 = vmovl_s8(_r1_s8); int16x8_t _r2 = vmovl_s8(_r2_s8); int32x4_t _sum03, _sum47; _sum03 = vld1q_lane_s32(outptr0, _sum03, 0);// out0 _sum03 = vld1q_lane_s32(outptr1, _sum03, 1);// out1 _sum03 = vld1q_lane_s32(outptr2, _sum03, 2);// out2 _sum03 = vld1q_lane_s32(outptr3, _sum03, 3);// out3 _sum47 = vld1q_lane_s32(outptr4, _sum47, 0);// out4 _sum47 = vld1q_lane_s32(outptr5, _sum47, 1);// out5 _sum47 = vld1q_lane_s32(outptr6, _sum47, 2);// out6 _sum47 = vld1q_lane_s32(outptr7, _sum47, 3);// out7 // k0 - k2 int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70) int8x8_t _k1_8 = vld1_s8(ktmp+8); //(k01-k71) int8x8_t _k2_8 = vld1_s8(ktmp+16); //(k02-k72) int16x8_t _k0 = vmovl_s8(_k0_8); int16x8_t _k1 = vmovl_s8(_k1_8); int16x8_t _k2 = vmovl_s8(_k2_8); int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0); int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0); int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1); int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2); // k3 - k5 _k0_8 = vld1_s8(ktmp+24); //(k03-k73) _k1_8 = vld1_s8(ktmp+32); //(k04-k74) _k2_8 = vld1_s8(ktmp+40); //(k05-k75) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2); // k6 - k8 _k0_8 = vld1_s8(ktmp+48); //(k06-k76) _k1_8 = vld1_s8(ktmp+56); //(k07-k77) _k2_8 = vld1_s8(ktmp+64); //(k08-k78) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2); _sum0 = vaddq_s32(_sum0, _sum1); _sum0n = vaddq_s32(_sum0n, _sum1n); _sum03 = vaddq_s32(_sum03, _sum0); _sum47 = vaddq_s32(_sum47, _sum0n); vst1q_lane_s32(outptr0, _sum03, 0); vst1q_lane_s32(outptr1, _sum03, 1); vst1q_lane_s32(outptr2, _sum03, 2); vst1q_lane_s32(outptr3, _sum03, 3); vst1q_lane_s32(outptr4, _sum47, 0); vst1q_lane_s32(outptr5, _sum47, 1); vst1q_lane_s32(outptr6, _sum47, 2); vst1q_lane_s32(outptr7, _sum47, 3); outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #else // __aarch64__ asm volatile( "pld [%8, #64] \n" "vld1.s8 {d0}, [%8] \n"// d0(a00 a01 a02 ....) "pld [%9, #64] \n" "vld1.s8 {d2}, [%9] \n"// d2(a10 a11 a12 ....) "pld [%10, #64] \n" "vld1.s8 {d4}, [%10] \n"// d4(a20 a21 a22 ....) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n"// d6(k00-k70) d7(k01-k71) d8(k02-k72) "vmovl.s8 q0, d0 \n"// d0(a00 a01 a02 x) "vmovl.s8 q1, d2 \n"// d2(a10 a11 a12 x) "vmovl.s8 q2, d4 \n"// d4(a20 a21 a22 x) "vmovl.s8 q5, d8 \n"// d10(k02-k32) d11(k42-k72) "vmovl.s8 q4, d7 \n"// d8(k01-k31) d9(k41-k71) "vmovl.s8 q3, d6 \n"// d6(k00-k30) d7(k40-k70) "vld1.s32 {d20[0]}, [%0] \n"// out0 q10 "vld1.s32 {d20[1]}, [%1] \n"// out1 "vld1.s32 {d21[0]}, [%2] \n"// out2 "vld1.s32 {d21[1]}, [%3] \n"// out3 "pld [%11, #64] \n" "vld1.s8 {d24-d26}, [%11]! \n" "vmovl.s8 q14, d26 \n"// d28(k05-k35) d29(k45-k75) "vmovl.s8 q13, d25 \n"// d26(k04-k34) d27(k44-k74) "vmovl.s8 q12, d24 \n"// d24(k03-k33) d25(k43-k73) "vld1.s32 {d22[0]}, [%4] \n"// out4 q11 "vld1.s32 {d22[1]}, [%5] \n"// out5 "vld1.s32 {d23[0]}, [%6] \n"// out6 "vld1.s32 {d23[1]}, [%7] \n"// out7 "vmull.s16 q6, d6, d0[0] \n"// a00 x (k00-k30) "vmull.s16 q7, d7, d0[0] \n"// a00 x (k40-k70) "vmull.s16 q8, d8, d0[1] \n"// a01 x (k01-k31) "vmull.s16 q9, d9, d0[1] \n"// a01 x (k41-k71) "vmlal.s16 q10, d10, d0[2] \n"// a02 x (k02-k32) "vmlal.s16 q11, d11, d0[2] \n"// a02 x (k42-k72) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" "vmovl.s8 q5, d8 \n"// d10(k08-k38) d11(k48-k78) "vmovl.s8 q4, d7 \n"// d8(k07-k37) d9(k47-k77) "vmovl.s8 q3, d6 \n"// d6(k06-k36) d7(k46-k76) "vmlal.s16 q6, d24, d2[0] \n"// a10 x (k03-k33) "vmlal.s16 q7, d25, d2[0] \n"// a10 x (k43-k73) "vmlal.s16 q8, d26, d2[1] \n"// a11 x (k04-k34) "vmlal.s16 q9, d27, d2[1] \n"// a11 x (k44-k74) "vmlal.s16 q10, d28, d2[2] \n"// a12 x (k05-k35) "vmlal.s16 q11, d29, d2[2] \n"// a12 x (k45-k75) "vmlal.s16 q6, d6, d4[0] \n"// a20 x (k06-k36) "vmlal.s16 q7, d7, d4[0] \n"// a20 x (k46-k76) "vmlal.s16 q8, d8, d4[1] \n"// a21 x (k07-k37) "vmlal.s16 q9, d9, d4[1] \n"// a21 x (k47-k77) "vmlal.s16 q10, d10, d4[2] \n"// a22 x (k08-k38) "vmlal.s16 q11, d11, d4[2] \n"// a22 x (k48-k78) "vadd.s32 q8, q8, q6 \n" "vadd.s32 q9, q9, q7 \n" "sub %11, %11, #72 \n" "vadd.s32 q10, q10, q8 \n" "vadd.s32 q11, q11, q9 \n" "vst1.s32 {d20[0]}, [%0]! \n"// out0 "vst1.s32 {d20[1]}, [%1]! \n"// out1 "vst1.s32 {d21[0]}, [%2]! \n"// out2 "vst1.s32 {d21[1]}, [%3]! \n"// out3 "vst1.s32 {d22[0]}, [%4]! \n"// out4 "vst1.s32 {d22[1]}, [%5]! \n"// out5 "vst1.s32 {d23[0]}, [%6]! \n"// out6 "vst1.s32 {d23[1]}, [%7]! \n"// out7 : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else // __ARM_NEON int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; sum0 += (int)r0[0] * ktmp[0]; sum1 += (int)r0[0] * ktmp[1]; sum2 += (int)r0[0] * ktmp[2]; sum3 += (int)r0[0] * ktmp[3]; sum4 += (int)r0[0] * ktmp[4]; sum5 += (int)r0[0] * ktmp[5]; sum6 += (int)r0[0] * ktmp[6]; sum7 += (int)r0[0] * ktmp[7]; ktmp += 8; sum0 += (int)r0[1] * ktmp[0]; sum1 += (int)r0[1] * ktmp[1]; sum2 += (int)r0[1] * ktmp[2]; sum3 += (int)r0[1] * ktmp[3]; sum4 += (int)r0[1] * ktmp[4]; sum5 += (int)r0[1] * ktmp[5]; sum6 += (int)r0[1] * ktmp[6]; sum7 += (int)r0[1] * ktmp[7]; ktmp += 8; sum0 += (int)r0[2] * ktmp[0]; sum1 += (int)r0[2] * ktmp[1]; sum2 += (int)r0[2] * ktmp[2]; sum3 += (int)r0[2] * ktmp[3]; sum4 += (int)r0[2] * ktmp[4]; sum5 += (int)r0[2] * ktmp[5]; sum6 += (int)r0[2] * ktmp[6]; sum7 += (int)r0[2] * ktmp[7]; ktmp += 8; sum0 += (int)r1[0] * ktmp[0]; sum1 += (int)r1[0] * ktmp[1]; sum2 += (int)r1[0] * ktmp[2]; sum3 += (int)r1[0] * ktmp[3]; sum4 += (int)r1[0] * ktmp[4]; sum5 += (int)r1[0] * ktmp[5]; sum6 += (int)r1[0] * ktmp[6]; sum7 += (int)r1[0] * ktmp[7]; ktmp += 8; sum0 += (int)r1[1] * ktmp[0]; sum1 += (int)r1[1] * ktmp[1]; sum2 += (int)r1[1] * ktmp[2]; sum3 += (int)r1[1] * ktmp[3]; sum4 += (int)r1[1] * ktmp[4]; sum5 += (int)r1[1] * ktmp[5]; sum6 += (int)r1[1] * ktmp[6]; sum7 += (int)r1[1] * ktmp[7]; ktmp += 8; sum0 += (int)r1[2] * ktmp[0]; sum1 += (int)r1[2] * ktmp[1]; sum2 += (int)r1[2] * ktmp[2]; sum3 += (int)r1[2] * ktmp[3]; sum4 += (int)r1[2] * ktmp[4]; sum5 += (int)r1[2] * ktmp[5]; sum6 += (int)r1[2] * ktmp[6]; sum7 += (int)r1[2] * ktmp[7]; ktmp += 8; sum0 += (int)r2[0] * ktmp[0]; sum1 += (int)r2[0] * ktmp[1]; sum2 += (int)r2[0] * ktmp[2]; sum3 += (int)r2[0] * ktmp[3]; sum4 += (int)r2[0] * ktmp[4]; sum5 += (int)r2[0] * ktmp[5]; sum6 += (int)r2[0] * ktmp[6]; sum7 += (int)r2[0] * ktmp[7]; ktmp += 8; sum0 += (int)r2[1] * ktmp[0]; sum1 += (int)r2[1] * ktmp[1]; sum2 += (int)r2[1] * ktmp[2]; sum3 += (int)r2[1] * ktmp[3]; sum4 += (int)r2[1] * ktmp[4]; sum5 += (int)r2[1] * ktmp[5]; sum6 += (int)r2[1] * ktmp[6]; sum7 += (int)r2[1] * ktmp[7]; ktmp += 8; sum0 += (int)r2[2] * ktmp[0]; sum1 += (int)r2[2] * ktmp[1]; sum2 += (int)r2[2] * ktmp[2]; sum3 += (int)r2[2] * ktmp[3]; sum4 += (int)r2[2] * ktmp[4]; sum5 += (int)r2[2] * ktmp[5]; sum6 += (int)r2[2] * ktmp[6]; sum7 += (int)r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8*9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8*9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char* ktmp = _kernel.channel(p/8 + p%8); for (int q=0; q<inch; q++) { int* outptr = out; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b}, [%5] \n"//ktmp "ld2 {v2.8b, v3.8b}, [%2], #16 \n"//r0-r2 "ld2 {v4.8b, v5.8b}, [%2] \n" "ld2 {v6.8b, v7.8b}, [%3], #16 \n"//r3-r5 "ld2 {v8.8b, v9.8b}, [%3] \n" "ld2 {v10.8b, v11.8b}, [%4], #16 \n"//r6-r8 "ld2 {v12.8b, v13.8b}, [%4] \n" "ld1 {v14.4s, v15.4s}, [%1] \n"//out0 "ext v4.8b, v2.8b, v4.8b, #1 \n" "ext v8.8b, v6.8b, v8.8b, #1 \n" "ext v12.8b, v10.8b, v12.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k0-k7) "sshll v1.8h, v1.8b, #0 \n"//(k8) "sshll v2.8h, v2.8b, #0 \n"// r0 "sshll v3.8h, v3.8b, #0 \n"// r1 "sshll v4.8h, v4.8b, #0 \n"// r2 "sshll v6.8h, v6.8b, #0 \n"// r3 "sshll v7.8h, v7.8b, #0 \n"// r4 "sshll v8.8h, v8.8b, #0 \n"// r5 "sshll v10.8h, v10.8b, #0 \n"// r6 "sshll v11.8h, v11.8b, #0 \n"// r7 "sshll v12.8h, v12.8b, #0 \n"// r8 // r0 "smull v16.4s, v2.4h, v0.h[0] \n"// out = r0*k0 "smull2 v17.4s, v2.8h, v0.h[0] \n" "smull v18.4s, v3.4h, v0.h[1] \n"// outn = r1*k1 "smull2 v19.4s, v3.8h, v0.h[1] \n" "smlal v16.4s, v4.4h, v0.h[2] \n"// out = r2*k2 "smlal2 v17.4s, v4.8h, v0.h[2] \n" "smlal v18.4s, v6.4h, v0.h[3] \n"// outn = r3*k3 "smlal2 v19.4s, v6.8h, v0.h[3] \n" "smlal v16.4s, v7.4h, v0.h[4] \n"// out = r4*k4 "smlal2 v17.4s, v7.8h, v0.h[4] \n" "smlal v18.4s, v8.4h, v0.h[5] \n"// outn = r5*k5 "smlal2 v19.4s, v8.8h, v0.h[5] \n" "smlal v16.4s, v10.4h, v0.h[6] \n"// out = r6*k6 "smlal2 v17.4s, v10.8h, v0.h[6] \n" "smlal v18.4s, v11.4h, v0.h[7] \n"// outn = r7*k7 "smlal2 v19.4s, v11.8h, v0.h[7] \n" "smlal v16.4s, v12.4h, v1.h[0] \n"// out = r8*k8 "smlal2 v17.4s, v12.8h, v1.h[0] \n" "add v8.4s, v16.4s, v18.4s \n" "add v9.4s, v17.4s, v19.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } #else if (nn > 0) { asm volatile( "vld1.s8 {d0-d1}, [%5] \n"// d0(k0 - k7) d1(k8 ...) "vmovl.s8 q1, d1 \n"// d2(k8 ...) "vmovl.s8 q0, d0 \n"// d0(k0 - k3) d1(k4 - k7) "0: \n" "pld [%2, #192] \n" "vld2.s8 {d4-d5}, [%2]! \n"// r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015) "vld2.s8 {d8-d9}, [%2] \n"// d8(a016 ....) "vld2.s8 {d10-d11}, [%3]! \n"// r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115) "vld2.s8 {d14-d15}, [%3] \n"// d14(a116 ....) "vld2.s8 {d16-d17}, [%4]! \n"// r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215) "vld2.s8 {d20-d21}, [%4] \n"// d20(a216 ....) "vld1.s32 {d22-d25}, [%1] \n"// q11(out0 - out3) q12(out4 - out7) "vext.s8 d8, d4, d8, #1 \n"// d8(a02 a04 ... a016) "vext.s8 d14, d10, d14, #1 \n"// d14(a12 a14 ... a116) "vext.s8 d20, d16, d20, #1 \n"// d20(a22 a24 ... a216) "vmovl.s8 q3, d5 \n"// q3(a01 a03 ... a015) "vmovl.s8 q2, d4 \n"// q2(a00 a02 ... a014) "vmovl.s8 q4, d8 \n"// q4(a02 a04 ... a016) "vmovl.s8 q6, d11 \n"// q6(a11 a13 ... a115) "vmovl.s8 q5, d10 \n"// q5(a10 a12 ... a114) "vmovl.s8 q7, d14 \n"// q7(a12 a14 ... a116) "vmovl.s8 q9, d17 \n"// q9(a21 a23 ... a215) "vmovl.s8 q8, d16 \n"// q8(a20 a22 ... a214) "vmovl.s8 q10, d20 \n"// q10(a22 a24 ... a216) "vmlal.s16 q11, d4, d0[0] \n"// k0 "vmlal.s16 q12, d5, d0[0] \n" "vmull.s16 q13, d6, d0[1] \n"// k1 "vmull.s16 q14, d7, d0[1] \n" "vmlal.s16 q11, d8, d0[2] \n"// k2 "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q13, d12, d1[0] \n"// k4 "vmlal.s16 q14, d13, d1[0] \n" "vmlal.s16 q11, d10, d0[3] \n"// k3 "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q13, d14, d1[1] \n"// k5 "vmlal.s16 q14, d15, d1[1] \n" "vmlal.s16 q11, d16, d1[2] \n"// k6 "vmlal.s16 q12, d17, d1[2] \n" "vmlal.s16 q13, d18, d1[3] \n"// k7 "vmlal.s16 q14, d19, d1[3] \n" "vmlal.s16 q11, d20, d2[0] \n"// k8 "vmlal.s16 q12, d21, d2[0] \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.32 {d22-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON if (remain > 0) { #if __ARM_NEON int8x8_t _k01234567s8 = vld1_s8(ktmp); int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp+8); int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3); int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6); int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8); int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8); int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8); #endif for (; remain>0; remain--) { #if __ARM_NEON int8x8_t _r00s8 = vld1_s8(r0); int8x8_t _r10s8 = vld1_s8(r1); int8x8_t _r20s8 = vld1_s8(r2); int16x8_t _r00s16 = vmovl_s8(_r00s8); int16x8_t _r10s16 = vmovl_s8(_r10s8); int16x8_t _r20s16 = vmovl_s8(_r20s8); int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16)); _sum = vsetq_lane_s32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_s32(_sum); #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); *outptr = vget_lane_s32(_ss, 0); #endif // __aarch64__ #else int sum = 0; sum += (int)r0[0] * ktmp[0]; sum += (int)r0[1] * ktmp[1]; sum += (int)r0[2] * ktmp[2]; sum += (int)r1[0] * ktmp[3]; sum += (int)r1[1] * ktmp[4]; sum += (int)r1[2] * ktmp[5]; sum += (int)r2[0] * ktmp[6]; sum += (int)r2[1] * ktmp[7]; sum += (int)r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } } static void conv3x3s2_packed_int8_e2e_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p+0); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); out0.fill(0); out1.fill(0); out2.fill(0); out3.fill(0); out4.fill(0); out5.fill(0); out6.fill(0); out7.fill(0); const signed char* ktmp = _kernel.channel(p/8); for (int q=0; q<inch; q++) { short* outptr0 = out0; short* outptr1 = out1; short* outptr2 = out2; short* outptr3 = out3; short* outptr4 = out4; short* outptr5 = out5; short* outptr6 = out6; short* outptr7 = out7; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; int16x8_t _int1 = vdupq_n_s16(1); #else int nn = outw >> 3; int remain = outw & 7; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%12, #384] \n" "prfm pldl1keep, [%9, #384] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%9], #16 \n"//r0 r1 "ld2 {v5.8b, v6.8b}, [%9] \n" "ld1 {v24.8h}, [%1] \n"//out0 "ld1 {v25.8h}, [%2] \n"//out1 "ld1 {v26.8h}, [%3] \n"//out2 "ld1 {v27.8h}, [%4] \n"//out3 "ld1 {v28.8h}, [%5] \n"//out4 "ld1 {v29.8h}, [%6] \n"//out5 "ld1 {v30.8h}, [%7] \n"//out6 "ld1 {v31.8h}, [%8] \n"//out7 "ext v6.8b, v3.8b, v5.8b, #1 \n"//r2 "dup v9.8b, v0.b[0] \n" "dup v11.8b, v0.b[1] \n" "dup v13.8b, v0.b[2] \n" "dup v15.8b, v0.b[3] \n" "dup v17.8b, v0.b[4] \n" "dup v19.8b, v0.b[5] \n" "dup v21.8b, v0.b[6] \n" "dup v23.8b, v0.b[7] \n" // r0 "smull v8.8h, v3.8b, v9.8b \n"// out0 += (r00-r07)*k00 "smull v10.8h, v3.8b, v11.8b \n"// out1 += (r00-r07)*k10 "smull v12.8h, v3.8b, v13.8b \n"// out2 += (r00-r07)*k20 "smull v14.8h, v3.8b, v15.8b \n"// out3 += (r00-r07)*k30 "smull v16.8h, v3.8b, v17.8b \n"// out4 += (r00-r07)*k40 "smull v18.8h, v3.8b, v19.8b \n"// out5 += (r00-r07)*k50 "smull v20.8h, v3.8b, v21.8b \n"// out6 += (r00-r07)*k60 "smull v22.8h, v3.8b, v23.8b \n"// out7 += (r00-r07)*k70 "dup v9.8b, v1.b[0] \n" "dup v11.8b, v1.b[1] \n" "dup v13.8b, v1.b[2] \n" "dup v15.8b, v1.b[3] \n" "dup v17.8b, v1.b[4] \n" "dup v19.8b, v1.b[5] \n" "dup v21.8b, v1.b[6] \n" "dup v23.8b, v1.b[7] \n" // r1 "smlal v8.8h, v4.8b, v9.8b \n"// out0 += (r10-r17)*k01 "smlal v10.8h, v4.8b, v11.8b \n"// out1 += (r10-r17)*k11 "smlal v12.8h, v4.8b, v13.8b \n"// out2 += (r10-r17)*k21 "smlal v14.8h, v4.8b, v15.8b \n"// out3 += (r10-r17)*k31 "smlal v16.8h, v4.8b, v17.8b \n"// out4 += (r10-r17)*k41 "smlal v18.8h, v4.8b, v19.8b \n"// out5 += (r10-r17)*k51 "smlal v20.8h, v4.8b, v21.8b \n"// out6 += (r10-r17)*k61 "smlal v22.8h, v4.8b, v23.8b \n"// out7 += (r10-r17)*k71 "dup v9.8b, v2.b[0] \n" "dup v11.8b, v2.b[1] \n" "dup v13.8b, v2.b[2] \n" "dup v15.8b, v2.b[3] \n" "dup v17.8b, v2.b[4] \n" "dup v19.8b, v2.b[5] \n" "dup v21.8b, v2.b[6] \n" "dup v23.8b, v2.b[7] \n" // r2 "smlal v8.8h, v6.8b, v9.8b \n"// out0 += (r20-r27)*k02 "smlal v10.8h, v6.8b, v11.8b \n"// out1 += (r20-r27)*k12 "smlal v12.8h, v6.8b, v13.8b \n"// out2 += (r20-r27)*k22 "smlal v14.8h, v6.8b, v15.8b \n"// out3 += (r20-r27)*k32 "smlal v16.8h, v6.8b, v17.8b \n"// out4 += (r20-r27)*k42 "smlal v18.8h, v6.8b, v19.8b \n"// out5 += (r20-r27)*k52 "smlal v20.8h, v6.8b, v21.8b \n"// out6 += (r20-r27)*k62 "smlal v22.8h, v6.8b, v23.8b \n"// out7 += (r20-r27)*k72 "prfm pldl1keep, [%12, #384] \n" "prfm pldl1keep, [%10, #384] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%10], #16 \n"//r3-r5 "ld2 {v5.8b, v6.8b}, [%10] \n" "dup v9.8b, v0.b[0] \n" "dup v11.8b, v0.b[1] \n" "dup v13.8b, v0.b[2] \n" "dup v15.8b, v0.b[3] \n" "dup v17.8b, v0.b[4] \n" "dup v19.8b, v0.b[5] \n" "dup v21.8b, v0.b[6] \n" "dup v23.8b, v0.b[7] \n" "ext v6.8b, v3.8b, v5.8b, #1 \n" // r3 "smlal v8.8h, v3.8b, v9.8b \n"// out0 += (r30-r37)*k03 "smlal v10.8h, v3.8b, v11.8b \n"// out1 += (r30-r37)*k13 "smlal v12.8h, v3.8b, v13.8b \n"// out2 += (r30-r37)*k23 "smlal v14.8h, v3.8b, v15.8b \n"// out3 += (r30-r37)*k33 "smlal v16.8h, v3.8b, v17.8b \n"// out4 += (r30-r37)*k43 "smlal v18.8h, v3.8b, v19.8b \n"// out5 += (r30-r37)*k53 "smlal v20.8h, v3.8b, v21.8b \n"// out6 += (r30-r37)*k63 "smlal v22.8h, v3.8b, v23.8b \n"// out7 += (r30-r37)*k73 "dup v9.8b, v1.b[0] \n" "dup v11.8b, v1.b[1] \n" "dup v13.8b, v1.b[2] \n" "dup v15.8b, v1.b[3] \n" "dup v17.8b, v1.b[4] \n" "dup v19.8b, v1.b[5] \n" "dup v21.8b, v1.b[6] \n" "dup v23.8b, v1.b[7] \n" // r4 "smlal v8.8h, v4.8b, v9.8b \n"// out0 += (r40-r47)*k04 "smlal v10.8h, v4.8b, v11.8b \n"// out1 += (r40-r47)*k14 "smlal v12.8h, v4.8b, v13.8b \n"// out2 += (r40-r47)*k24 "smlal v14.8h, v4.8b, v15.8b \n"// out3 += (r40-r47)*k34 "smlal v16.8h, v4.8b, v17.8b \n"// out4 += (r40-r47)*k44 "smlal v18.8h, v4.8b, v19.8b \n"// out5 += (r40-r47)*k54 "smlal v20.8h, v4.8b, v21.8b \n"// out6 += (r40-r47)*k64 "smlal v22.8h, v4.8b, v23.8b \n"// out7 += (r40-r47)*k74 "dup v9.8b, v2.b[0] \n" "dup v11.8b, v2.b[1] \n" "dup v13.8b, v2.b[2] \n" "dup v15.8b, v2.b[3] \n" "dup v17.8b, v2.b[4] \n" "dup v19.8b, v2.b[5] \n" "dup v21.8b, v2.b[6] \n" "dup v23.8b, v2.b[7] \n" // r5 "smlal v8.8h, v6.8b, v9.8b \n"// out0 += (r50-r57)*k05 "smlal v10.8h, v6.8b, v11.8b \n"// out1 += (r50-r57)*k15 "smlal v12.8h, v6.8b, v13.8b \n"// out2 += (r50-r57)*k25 "smlal v14.8h, v6.8b, v15.8b \n"// out3 += (r50-r57)*k35 "smlal v16.8h, v6.8b, v17.8b \n"// out4 += (r50-r57)*k45 "smlal v18.8h, v6.8b, v19.8b \n"// out5 += (r50-r57)*k55 "smlal v20.8h, v6.8b, v21.8b \n"// out6 += (r50-r57)*k65 "smlal v22.8h, v6.8b, v23.8b \n"// out7 += (r50-r57)*k75 "prfm pldl1keep, [%12, #384] \n" "prfm pldl1keep, [%11, #384] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%11], #16 \n"//r6-r8 "ld2 {v5.8b, v6.8b}, [%11] \n" "dup v9.8b, v0.b[0] \n" "dup v11.8b, v0.b[1] \n" "dup v13.8b, v0.b[2] \n" "dup v15.8b, v0.b[3] \n" "dup v17.8b, v0.b[4] \n" "dup v19.8b, v0.b[5] \n" "dup v21.8b, v0.b[6] \n" "dup v23.8b, v0.b[7] \n" "ext v6.8b, v3.8b, v5.8b, #1 \n" // r6 "smlal v8.8h, v3.8b, v9.8b \n"// out0 += (r60-r67)*k06 "smlal v10.8h, v3.8b, v11.8b \n"// out1 += (r60-r67)*k16 "smlal v12.8h, v3.8b, v13.8b \n"// out2 += (r60-r67)*k26 "smlal v14.8h, v3.8b, v15.8b \n"// out3 += (r60-r67)*k36 "smlal v16.8h, v3.8b, v17.8b \n"// out4 += (r60-r67)*k46 "smlal v18.8h, v3.8b, v19.8b \n"// out5 += (r60-r67)*k56 "smlal v20.8h, v3.8b, v21.8b \n"// out6 += (r60-r67)*k66 "smlal v22.8h, v3.8b, v23.8b \n"// out7 += (r60-r67)*k76 "dup v9.8b, v1.b[0] \n" "dup v11.8b, v1.b[1] \n" "dup v13.8b, v1.b[2] \n" "dup v15.8b, v1.b[3] \n" "dup v17.8b, v1.b[4] \n" "dup v19.8b, v1.b[5] \n" "dup v21.8b, v1.b[6] \n" "dup v23.8b, v1.b[7] \n" // r7 "smlal v8.8h, v4.8b, v9.8b \n"// out0 += (r70-r77)*k07 "smlal v10.8h, v4.8b, v11.8b \n"// out1 += (r70-r77)*k17 "smlal v12.8h, v4.8b, v13.8b \n"// out2 += (r70-r77)*k27 "smlal v14.8h, v4.8b, v15.8b \n"// out3 += (r70-r77)*k37 "smlal v16.8h, v4.8b, v17.8b \n"// out4 += (r70-r77)*k47 "smlal v18.8h, v4.8b, v19.8b \n"// out5 += (r70-r77)*k57 "smlal v20.8h, v4.8b, v21.8b \n"// out6 += (r70-r77)*k67 "smlal v22.8h, v4.8b, v23.8b \n"// out7 += (r70-r77)*k77 "dup v9.8b, v2.b[0] \n" "dup v11.8b, v2.b[1] \n" "dup v13.8b, v2.b[2] \n" "dup v15.8b, v2.b[3] \n" "dup v17.8b, v2.b[4] \n" "dup v19.8b, v2.b[5] \n" "dup v21.8b, v2.b[6] \n" "dup v23.8b, v2.b[7] \n" // r8 "smlal v8.8h, v6.8b, v9.8b \n"// out0 += (r80-r87)*k08 "smlal v10.8h, v6.8b, v11.8b \n"// out1 += (r80-r87)*k18 "smlal v12.8h, v6.8b, v13.8b \n"// out2 += (r80-r87)*k28 "smlal v14.8h, v6.8b, v15.8b \n"// out3 += (r80-r87)*k38 "smlal v16.8h, v6.8b, v17.8b \n"// out4 += (r80-r87)*k48 "smlal v18.8h, v6.8b, v19.8b \n"// out5 += (r80-r87)*k58 "smlal v20.8h, v6.8b, v21.8b \n"// out6 += (r80-r87)*k68 "smlal v22.8h, v6.8b, v23.8b \n"// out7 += (r80-r87)*k78 // add 1 and shift right 1 // "add v8.8h, v8.8h, %26.8h \n" // "add v10.8h, v10.8h, %26.8h \n" // "add v12.8h, v12.8h, %26.8h \n" // "add v14.8h, v14.8h, %26.8h \n" // "add v16.8h, v16.8h, %26.8h \n" // "add v18.8h, v18.8h, %26.8h \n" // "add v20.8h, v20.8h, %26.8h \n" // "add v22.8h, v22.8h, %26.8h \n" // "sshr v8.8h, v8.8h, #1 \n" // "sshr v10.8h, v10.8h, #1 \n" // "sshr v12.8h, v12.8h, #1 \n" // "sshr v14.8h, v14.8h, #1 \n" // "sshr v16.8h, v16.8h, #1 \n" // "sshr v18.8h, v18.8h, #1 \n" // "sshr v20.8h, v20.8h, #1 \n" // "sshr v22.8h, v22.8h, #1 \n" // add saturate to s16 "sqadd v24.8h, v24.8h, v8.8h \n" "sqadd v25.8h, v25.8h, v10.8h \n" "sqadd v26.8h, v26.8h, v12.8h \n" "sqadd v27.8h, v27.8h, v14.8h \n" "sqadd v28.8h, v28.8h, v16.8h \n" "sqadd v29.8h, v29.8h, v18.8h \n" "sqadd v30.8h, v30.8h, v20.8h \n" "sqadd v31.8h, v31.8h, v22.8h \n" "st1 {v24.8h}, [%1], #16 \n" "st1 {v25.8h}, [%2], #16 \n" "st1 {v26.8h}, [%3], #16 \n" "st1 {v27.8h}, [%4], #16 \n" "st1 {v28.8h}, [%5], #16 \n" "st1 {v29.8h}, [%6], #16 \n" "st1 {v30.8h}, [%7], #16 \n" "st1 {v31.8h}, [%8], #16 \n" "subs %w0, %w0, #1 \n" "sub %12, %12, #72 \n"// reset ktmp "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp), "w"(_int1) // %26 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.s32 {d16-d17}, [%1] \n"// out0 "pld [%2, #128] \n" "vld1.s32 {d18-d19}, [%2] \n"// out1 "pld [%3, #128] \n" "vld1.s32 {d20-d21}, [%3] \n"// out2 "pld [%4, #128] \n" "vld1.s32 {d22-d23}, [%4] \n"// out3 // r0 "pld [%9, #64] \n" "vld2.s8 {d8-d9}, [%9] \n"// d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015) "add %9, #8 \n" "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k00-k70) d1(k01-k71) d2(k02-k72) "pld [%5, #128] \n" "vld1.s32 {d24-d25}, [%5] \n"// out4 "pld [%6, #128] \n" "vld1.s32 {d26-d27}, [%6] \n"// out5 "vmovl.s8 q2, d2 \n"// q2(k02-k72) "vmovl.s8 q1, d1 \n"// q1(k01-k71) "vmovl.s8 q0, d0 \n"// q0(k00-k70) "vext.s8 d12, d8, d8, #1 \n"// d12(a02 a04 a06 a08 x x x x) "pld [%7, #128] \n" "vld1.s32 {d28-d29}, [%7] \n"// out6 "vmovl.s8 q5, d9 \n"// q5(a01 a03 a05 a07 a09 a011 a013 a015) d11 "vmovl.s8 q4, d8 \n"// q4(a00 a02 a04 a06 a08 a010 a012 a014) d9 "vmovl.s8 q6, d12 \n"// q6(a02 a04 a06 a08 a010 a012 a014 a016) d13 "pld [%8, #128] \n" "vld1.s32 {d30-d31}, [%8] \n"// out7 "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a00 a02 a04 a06) * k00 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a00 a02 a04 a06) * k10 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a00 a02 a04 a06) * k20 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a00 a02 a04 a06) * k30 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a00 a02 a04 a06) * k40 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a00 a02 a04 a06) * k50 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a00 a02 a04 a06) * k60 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a00 a02 a04 a06) * k70 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a01-a07) * k01 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a01-a07) * k11 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a01-a07) * k21 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a01-a07) * k31 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a01-a07) * k41 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a01-a07) * k51 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a01-a07) * k61 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a01-a07) * k71 "pld [%10, #64] \n" "vld2.s8 {d8-d9}, [%10] \n"// d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115) "add %10, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a02-a08) * k02 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a02-a08) * k12 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a02-a08) * k22 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a02-a08) * k32 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k03-k73) d1(k04-k74) d2(k05-k75) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a02-a08) * k42 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a02-a08) * k52 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a02-a08) * k62 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a02-a08) * k72 // r1 "vext.s8 d12, d8, d8, #1 \n"// d12(a12 a14 a16 a18 x x x x) "vmovl.s8 q2, d2 \n"// q2(k05-k75) "vmovl.s8 q1, d1 \n"// q1(k04-k74) "vmovl.s8 q0, d0 \n"// q0(k03-k73) "vmovl.s8 q5, d9 \n"// q5(a11-a115) "vmovl.s8 q4, d8 \n"// q4(a10-a114) "vmovl.s8 q6, d12 \n"// q6(a12-a116) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a10-a16) * k03 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a10-a16) * k13 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a10-a16) * k23 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a10-a16) * k33 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a10-a16) * k43 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a10-a16) * k53 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a10-a16) * k63 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a10-a16) * k73 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a11-a17) * k04 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a11-a17) * k14 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a11-a17) * k24 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a11-a17) * k34 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a11-a17) * k44 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a11-a17) * k54 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a11-a17) * k64 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a11-a17) * k74 "pld [%11, #64] \n" "vld2.s8 {d8-d9}, [%11] \n"// d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215) "add %11, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a12-a18) * k05 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a12-a18) * k15 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a12-a18) * k25 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a12-a18) * k35 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k06-k76) d1(k07-k77) d2(k08-k78) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a12-a18) * k45 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a12-a18) * k55 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a12-a18) * k65 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a12-a18) * k75 // r2 "vext.s8 d12, d8, d8, #1 \n"// d12(a22 a24 a26 a28 x x x x) "vmovl.s8 q2, d2 \n"// q2(k08-k78) "vmovl.s8 q1, d1 \n"// q1(k07-k77) "vmovl.s8 q0, d0 \n"// q0(k06-k76) "vmovl.s8 q5, d9 \n"// q5(a21-a215) "vmovl.s8 q4, d8 \n"// q4(a20-a214) "vmovl.s8 q6, d12 \n"// q6(a22-a216) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a20-a26) * k06 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a20-a26) * k16 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a20-a26) * k26 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a20-a26) * k36 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a20-a26) * k46 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a20-a26) * k56 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a20-a26) * k66 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a20-a26) * k76 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a21-a27) * k07 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a21-a27) * k17 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a21-a27) * k27 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a21-a27) * k37 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a21-a27) * k47 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a21-a27) * k57 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a21-a27) * k67 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a21-a27) * k77 "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a22-a28) * k08 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a22-a28) * k18 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a22-a28) * k28 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a22-a28) * k38 "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a22-a28) * k48 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a22-a28) * k58 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a22-a28) * k68 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a22-a28) * k78 // save s32 to memory "sub %12, %12, #72 \n" "vst1.s32 {d16-d17}, [%1]! \n"// out0 "vst1.s32 {d18-d19}, [%2]! \n"// out1 "vst1.s32 {d20-d21}, [%3]! \n"// out2 "vst1.s32 {d22-d23}, [%4]! \n"// out3 "subs %0, #1 \n" "vst1.s32 {d24-d25}, [%5]! \n"// out4 "vst1.s32 {d26-d27}, [%6]! \n"// out5 "vst1.s32 {d28-d29}, [%7]! \n"// out6 "vst1.s32 {d30-d31}, [%8]! \n"// out7 "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON #if 1 //__aarch64__ if (remain > 4) { remain -= 4; asm volatile( "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%9] \n"//r0 r1 "add %9, %9, #8 \n" "ld1 {v24.4h}, [%1] \n"//out0 "ld1 {v25.4h}, [%2] \n"//out1 "ld1 {v26.4h}, [%3] \n"//out2 "ld1 {v27.4h}, [%4] \n"//out3 "ld1 {v28.4h}, [%5] \n"//out4 "ld1 {v29.4h}, [%6] \n"//out5 "ld1 {v30.4h}, [%7] \n"//out6 "ld1 {v31.4h}, [%8] \n"//out7 "ext v6.8b, v3.8b, v3.8b, #1 \n"//r2 "dup v9.8b, v0.b[0] \n" "dup v11.8b, v0.b[1] \n" "dup v13.8b, v0.b[2] \n" "dup v15.8b, v0.b[3] \n" "dup v17.8b, v0.b[4] \n" "dup v19.8b, v0.b[5] \n" "dup v21.8b, v0.b[6] \n" "dup v23.8b, v0.b[7] \n" // r0 "smull v8.8h, v3.8b, v9.8b \n"// out0 += (r00-r07)*k00 "smull v10.8h, v3.8b, v11.8b \n"// out1 += (r00-r07)*k10 "smull v12.8h, v3.8b, v13.8b \n"// out2 += (r00-r07)*k20 "smull v14.8h, v3.8b, v15.8b \n"// out3 += (r00-r07)*k30 "smull v16.8h, v3.8b, v17.8b \n"// out4 += (r00-r07)*k40 "smull v18.8h, v3.8b, v19.8b \n"// out5 += (r00-r07)*k50 "smull v20.8h, v3.8b, v21.8b \n"// out6 += (r00-r07)*k60 "smull v22.8h, v3.8b, v23.8b \n"// out7 += (r00-r07)*k70 "dup v9.8b, v1.b[0] \n" "dup v11.8b, v1.b[1] \n" "dup v13.8b, v1.b[2] \n" "dup v15.8b, v1.b[3] \n" "dup v17.8b, v1.b[4] \n" "dup v19.8b, v1.b[5] \n" "dup v21.8b, v1.b[6] \n" "dup v23.8b, v1.b[7] \n" // r1 "smlal v8.8h, v4.8b, v9.8b \n"// out0 += (r10-r17)*k01 "smlal v10.8h, v4.8b, v11.8b \n"// out1 += (r10-r17)*k11 "smlal v12.8h, v4.8b, v13.8b \n"// out2 += (r10-r17)*k21 "smlal v14.8h, v4.8b, v15.8b \n"// out3 += (r10-r17)*k31 "smlal v16.8h, v4.8b, v17.8b \n"// out4 += (r10-r17)*k41 "smlal v18.8h, v4.8b, v19.8b \n"// out5 += (r10-r17)*k51 "smlal v20.8h, v4.8b, v21.8b \n"// out6 += (r10-r17)*k61 "smlal v22.8h, v4.8b, v23.8b \n"// out7 += (r10-r17)*k71 "dup v9.8b, v2.b[0] \n" "dup v11.8b, v2.b[1] \n" "dup v13.8b, v2.b[2] \n" "dup v15.8b, v2.b[3] \n" "dup v17.8b, v2.b[4] \n" "dup v19.8b, v2.b[5] \n" "dup v21.8b, v2.b[6] \n" "dup v23.8b, v2.b[7] \n" // r2 "smlal v8.8h, v6.8b, v9.8b \n"// out0 += (r20-r27)*k02 "smlal v10.8h, v6.8b, v11.8b \n"// out1 += (r20-r27)*k12 "smlal v12.8h, v6.8b, v13.8b \n"// out2 += (r20-r27)*k22 "smlal v14.8h, v6.8b, v15.8b \n"// out3 += (r20-r27)*k32 "smlal v16.8h, v6.8b, v17.8b \n"// out4 += (r20-r27)*k42 "smlal v18.8h, v6.8b, v19.8b \n"// out5 += (r20-r27)*k52 "smlal v20.8h, v6.8b, v21.8b \n"// out6 += (r20-r27)*k62 "smlal v22.8h, v6.8b, v23.8b \n"// out7 += (r20-r27)*k72 "prfm pldl1keep, [%12, #384] \n" "prfm pldl1keep, [%10, #384] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%10] \n"//r3-r5 "add %10, %10, #8 \n" "dup v9.8b, v0.b[0] \n" "dup v11.8b, v0.b[1] \n" "dup v13.8b, v0.b[2] \n" "dup v15.8b, v0.b[3] \n" "dup v17.8b, v0.b[4] \n" "dup v19.8b, v0.b[5] \n" "dup v21.8b, v0.b[6] \n" "dup v23.8b, v0.b[7] \n" "ext v6.8b, v3.8b, v3.8b, #1 \n" // r3 "smlal v8.8h, v3.8b, v9.8b \n"// out0 += (r30-r37)*k03 "smlal v10.8h, v3.8b, v11.8b \n"// out1 += (r30-r37)*k13 "smlal v12.8h, v3.8b, v13.8b \n"// out2 += (r30-r37)*k23 "smlal v14.8h, v3.8b, v15.8b \n"// out3 += (r30-r37)*k33 "smlal v16.8h, v3.8b, v17.8b \n"// out4 += (r30-r37)*k43 "smlal v18.8h, v3.8b, v19.8b \n"// out5 += (r30-r37)*k53 "smlal v20.8h, v3.8b, v21.8b \n"// out6 += (r30-r37)*k63 "smlal v22.8h, v3.8b, v23.8b \n"// out7 += (r30-r37)*k73 "dup v9.8b, v1.b[0] \n" "dup v11.8b, v1.b[1] \n" "dup v13.8b, v1.b[2] \n" "dup v15.8b, v1.b[3] \n" "dup v17.8b, v1.b[4] \n" "dup v19.8b, v1.b[5] \n" "dup v21.8b, v1.b[6] \n" "dup v23.8b, v1.b[7] \n" // r4 "smlal v8.8h, v4.8b, v9.8b \n"// out0 += (r40-r47)*k04 "smlal v10.8h, v4.8b, v11.8b \n"// out1 += (r40-r47)*k14 "smlal v12.8h, v4.8b, v13.8b \n"// out2 += (r40-r47)*k24 "smlal v14.8h, v4.8b, v15.8b \n"// out3 += (r40-r47)*k34 "smlal v16.8h, v4.8b, v17.8b \n"// out4 += (r40-r47)*k44 "smlal v18.8h, v4.8b, v19.8b \n"// out5 += (r40-r47)*k54 "smlal v20.8h, v4.8b, v21.8b \n"// out6 += (r40-r47)*k64 "smlal v22.8h, v4.8b, v23.8b \n"// out7 += (r40-r47)*k74 "dup v9.8b, v2.b[0] \n" "dup v11.8b, v2.b[1] \n" "dup v13.8b, v2.b[2] \n" "dup v15.8b, v2.b[3] \n" "dup v17.8b, v2.b[4] \n" "dup v19.8b, v2.b[5] \n" "dup v21.8b, v2.b[6] \n" "dup v23.8b, v2.b[7] \n" // r5 "smlal v8.8h, v6.8b, v9.8b \n"// out0 += (r50-r57)*k05 "smlal v10.8h, v6.8b, v11.8b \n"// out1 += (r50-r57)*k15 "smlal v12.8h, v6.8b, v13.8b \n"// out2 += (r50-r57)*k25 "smlal v14.8h, v6.8b, v15.8b \n"// out3 += (r50-r57)*k35 "smlal v16.8h, v6.8b, v17.8b \n"// out4 += (r50-r57)*k45 "smlal v18.8h, v6.8b, v19.8b \n"// out5 += (r50-r57)*k55 "smlal v20.8h, v6.8b, v21.8b \n"// out6 += (r50-r57)*k65 "smlal v22.8h, v6.8b, v23.8b \n"// out7 += (r50-r57)*k75 "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%11] \n"//r6-r8 "add %11, %11, #8 \n" "dup v9.8b, v0.b[0] \n" "dup v11.8b, v0.b[1] \n" "dup v13.8b, v0.b[2] \n" "dup v15.8b, v0.b[3] \n" "dup v17.8b, v0.b[4] \n" "dup v19.8b, v0.b[5] \n" "dup v21.8b, v0.b[6] \n" "dup v23.8b, v0.b[7] \n" "ext v6.8b, v3.8b, v3.8b, #1 \n" // r6 "smlal v8.8h, v3.8b, v9.8b \n"// out0 += (r60-r67)*k06 "smlal v10.8h, v3.8b, v11.8b \n"// out1 += (r60-r67)*k16 "smlal v12.8h, v3.8b, v13.8b \n"// out2 += (r60-r67)*k26 "smlal v14.8h, v3.8b, v15.8b \n"// out3 += (r60-r67)*k36 "smlal v16.8h, v3.8b, v17.8b \n"// out4 += (r60-r67)*k46 "smlal v18.8h, v3.8b, v19.8b \n"// out5 += (r60-r67)*k56 "smlal v20.8h, v3.8b, v21.8b \n"// out6 += (r60-r67)*k66 "smlal v22.8h, v3.8b, v23.8b \n"// out7 += (r60-r67)*k76 "dup v9.8b, v1.b[0] \n" "dup v11.8b, v1.b[1] \n" "dup v13.8b, v1.b[2] \n" "dup v15.8b, v1.b[3] \n" "dup v17.8b, v1.b[4] \n" "dup v19.8b, v1.b[5] \n" "dup v21.8b, v1.b[6] \n" "dup v23.8b, v1.b[7] \n" // r7 "smlal v8.8h, v4.8b, v9.8b \n"// out0 += (r70-r77)*k07 "smlal v10.8h, v4.8b, v11.8b \n"// out1 += (r70-r77)*k17 "smlal v12.8h, v4.8b, v13.8b \n"// out2 += (r70-r77)*k27 "smlal v14.8h, v4.8b, v15.8b \n"// out3 += (r70-r77)*k37 "smlal v16.8h, v4.8b, v17.8b \n"// out4 += (r70-r77)*k47 "smlal v18.8h, v4.8b, v19.8b \n"// out5 += (r70-r77)*k57 "smlal v20.8h, v4.8b, v21.8b \n"// out6 += (r70-r77)*k67 "smlal v22.8h, v4.8b, v23.8b \n"// out7 += (r70-r77)*k77 "dup v9.8b, v2.b[0] \n" "dup v11.8b, v2.b[1] \n" "dup v13.8b, v2.b[2] \n" "dup v15.8b, v2.b[3] \n" "dup v17.8b, v2.b[4] \n" "dup v19.8b, v2.b[5] \n" "dup v21.8b, v2.b[6] \n" "dup v23.8b, v2.b[7] \n" // r8 "smlal v8.8h, v6.8b, v9.8b \n"// out0 += (r80-r87)*k08 "smlal v10.8h, v6.8b, v11.8b \n"// out1 += (r80-r87)*k18 "smlal v12.8h, v6.8b, v13.8b \n"// out2 += (r80-r87)*k28 "smlal v14.8h, v6.8b, v15.8b \n"// out3 += (r80-r87)*k38 "smlal v16.8h, v6.8b, v17.8b \n"// out4 += (r80-r87)*k48 "smlal v18.8h, v6.8b, v19.8b \n"// out5 += (r80-r87)*k58 "smlal v20.8h, v6.8b, v21.8b \n"// out6 += (r80-r87)*k68 "smlal v22.8h, v6.8b, v23.8b \n"// out7 += (r80-r87)*k78 // add 1 and shift right 1 // "add v8.4h, v8.4h, %26.4h \n" // "add v10.4h, v10.4h, %26.4h \n" // "add v12.4h, v12.4h, %26.4h \n" // "add v14.4h, v14.4h, %26.4h \n" // "add v16.4h, v16.4h, %26.4h \n" // "add v18.4h, v18.4h, %26.4h \n" // "add v20.4h, v20.4h, %26.4h \n" // "add v22.4h, v22.4h, %26.4h \n" // "sshr v8.4h, v8.4h, #1 \n" // "sshr v10.4h, v10.4h, #1 \n" // "sshr v12.4h, v12.4h, #1 \n" // "sshr v14.4h, v14.4h, #1 \n" // "sshr v16.4h, v16.4h, #1 \n" // "sshr v18.4h, v18.4h, #1 \n" // "sshr v20.4h, v20.4h, #1 \n" // "sshr v22.4h, v22.4h, #1 \n" // add saturate to s16 "sqadd v24.4h, v24.4h, v8.4h \n" "sqadd v25.4h, v25.4h, v10.4h \n" "sqadd v26.4h, v26.4h, v12.4h \n" "sqadd v27.4h, v27.4h, v14.4h \n" "sqadd v28.4h, v28.4h, v16.4h \n" "sqadd v29.4h, v29.4h, v18.4h \n" "sqadd v30.4h, v30.4h, v20.4h \n" "sqadd v31.4h, v31.4h, v22.4h \n" "st1 {v24.4h}, [%1], #8 \n" "st1 {v25.4h}, [%2], #8 \n" "st1 {v26.4h}, [%3], #8 \n" "st1 {v27.4h}, [%4], #8 \n" "st1 {v28.4h}, [%5], #8 \n" "st1 {v29.4h}, [%6], #8 \n" "st1 {v30.4h}, [%7], #8 \n" "st1 {v31.4h}, [%8], #8 \n" "sub %12, %12, #72 \n"// reset ktmp : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp), "w"(_int1) // %26 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } #endif for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ int8x8_t _r0 = vld1_s8(r0);// (a00 a01 a02 ....) int8x8_t _r1 = vld1_s8(r1);// (a10 a11 a12 ....) int8x8_t _r2 = vld1_s8(r2);// (a20 a21 a22 ....) int8x8_t _r00 = vdup_n_s8(_r0[0]); int8x8_t _r01 = vdup_n_s8(_r0[1]); int8x8_t _r02 = vdup_n_s8(_r0[2]); int8x8_t _r10 = vdup_n_s8(_r1[0]); int8x8_t _r11 = vdup_n_s8(_r1[1]); int8x8_t _r12 = vdup_n_s8(_r1[2]); int8x8_t _r20 = vdup_n_s8(_r2[0]); int8x8_t _r21 = vdup_n_s8(_r2[1]); int8x8_t _r22 = vdup_n_s8(_r2[2]); int16x8_t _sum07; _sum07 = vld1q_lane_s16(outptr0, _sum07, 0);// out0 _sum07 = vld1q_lane_s16(outptr1, _sum07, 1);// out1 _sum07 = vld1q_lane_s16(outptr2, _sum07, 2);// out2 _sum07 = vld1q_lane_s16(outptr3, _sum07, 3);// out3 _sum07 = vld1q_lane_s16(outptr4, _sum07, 4);// out4 _sum07 = vld1q_lane_s16(outptr5, _sum07, 5);// out5 _sum07 = vld1q_lane_s16(outptr6, _sum07, 6);// out6 _sum07 = vld1q_lane_s16(outptr7, _sum07, 7);// out7 // k0 - k2 int8x8_t _k0 = vld1_s8(ktmp); //(k00-k70) int8x8_t _k1 = vld1_s8(ktmp+8); //(k01-k71) int8x8_t _k2 = vld1_s8(ktmp+16); //(k02-k72) int16x8_t _sum0 = vmull_s8(_k0, _r00); int16x8_t _sum1 = vmull_s8(_k1, _r01); int16x8_t _sum2 = vmull_s8(_k2, _r02); // k3 - k5 _k0 = vld1_s8(ktmp+24); //(k03-k73) _k1 = vld1_s8(ktmp+32); //(k04-k74) _k2 = vld1_s8(ktmp+40); //(k05-k75) _sum0 = vmlal_s8(_sum0, _k0, _r10); _sum1 = vmlal_s8(_sum1, _k1, _r11); _sum2 = vmlal_s8(_sum2, _k2, _r12); // k6 - k8 _k0 = vld1_s8(ktmp+48); //(k06-k76) _k1 = vld1_s8(ktmp+56); //(k07-k77) _k2 = vld1_s8(ktmp+64); //(k08-k78) _sum0 = vmlal_s8(_sum0, _k0, _r20); _sum1 = vmlal_s8(_sum1, _k1, _r21); _sum2 = vmlal_s8(_sum2, _k2, _r22); _sum1 = vaddq_s16(_sum1, _sum0); _sum2 = vaddq_s16(_sum2, _sum1); // add 1 shift right 1 // _sum2 = vaddq_s16(_sum2, _int1); // _sum2 = vshrq_n_s16(_sum2, 1); _sum07 = vqaddq_s16(_sum07, _sum2); vst1q_lane_s16(outptr0, _sum07, 0); vst1q_lane_s16(outptr1, _sum07, 1); vst1q_lane_s16(outptr2, _sum07, 2); vst1q_lane_s16(outptr3, _sum07, 3); vst1q_lane_s16(outptr4, _sum07, 4); vst1q_lane_s16(outptr5, _sum07, 5); vst1q_lane_s16(outptr6, _sum07, 6); vst1q_lane_s16(outptr7, _sum07, 7); outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #else // __aarch64__ asm volatile( "pld [%8, #64] \n" "vld1.s8 {d0}, [%8] \n"// d0(a00 a01 a02 ....) "pld [%9, #64] \n" "vld1.s8 {d2}, [%9] \n"// d2(a10 a11 a12 ....) "pld [%10, #64] \n" "vld1.s8 {d4}, [%10] \n"// d4(a20 a21 a22 ....) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n"// d6(k00-k70) d7(k01-k71) d8(k02-k72) "vmovl.s8 q0, d0 \n"// d0(a00 a01 a02 x) "vmovl.s8 q1, d2 \n"// d2(a10 a11 a12 x) "vmovl.s8 q2, d4 \n"// d4(a20 a21 a22 x) "vmovl.s8 q5, d8 \n"// d10(k02-k32) d11(k42-k72) "vmovl.s8 q4, d7 \n"// d8(k01-k31) d9(k41-k71) "vmovl.s8 q3, d6 \n"// d6(k00-k30) d7(k40-k70) "vld1.s32 {d20[0]}, [%0] \n"// out0 q10 "vld1.s32 {d20[1]}, [%1] \n"// out1 "vld1.s32 {d21[0]}, [%2] \n"// out2 "vld1.s32 {d21[1]}, [%3] \n"// out3 "pld [%11, #64] \n" "vld1.s8 {d24-d26}, [%11]! \n" "vmovl.s8 q14, d26 \n"// d28(k05-k35) d29(k45-k75) "vmovl.s8 q13, d25 \n"// d26(k04-k34) d27(k44-k74) "vmovl.s8 q12, d24 \n"// d24(k03-k33) d25(k43-k73) "vld1.s32 {d22[0]}, [%4] \n"// out4 q11 "vld1.s32 {d22[1]}, [%5] \n"// out5 "vld1.s32 {d23[0]}, [%6] \n"// out6 "vld1.s32 {d23[1]}, [%7] \n"// out7 "vmull.s16 q6, d6, d0[0] \n"// a00 x (k00-k30) "vmull.s16 q7, d7, d0[0] \n"// a00 x (k40-k70) "vmull.s16 q8, d8, d0[1] \n"// a01 x (k01-k31) "vmull.s16 q9, d9, d0[1] \n"// a01 x (k41-k71) "vmlal.s16 q10, d10, d0[2] \n"// a02 x (k02-k32) "vmlal.s16 q11, d11, d0[2] \n"// a02 x (k42-k72) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" "vmovl.s8 q5, d8 \n"// d10(k08-k38) d11(k48-k78) "vmovl.s8 q4, d7 \n"// d8(k07-k37) d9(k47-k77) "vmovl.s8 q3, d6 \n"// d6(k06-k36) d7(k46-k76) "vmlal.s16 q6, d24, d2[0] \n"// a10 x (k03-k33) "vmlal.s16 q7, d25, d2[0] \n"// a10 x (k43-k73) "vmlal.s16 q8, d26, d2[1] \n"// a11 x (k04-k34) "vmlal.s16 q9, d27, d2[1] \n"// a11 x (k44-k74) "vmlal.s16 q10, d28, d2[2] \n"// a12 x (k05-k35) "vmlal.s16 q11, d29, d2[2] \n"// a12 x (k45-k75) "vmlal.s16 q6, d6, d4[0] \n"// a20 x (k06-k36) "vmlal.s16 q7, d7, d4[0] \n"// a20 x (k46-k76) "vmlal.s16 q8, d8, d4[1] \n"// a21 x (k07-k37) "vmlal.s16 q9, d9, d4[1] \n"// a21 x (k47-k77) "vmlal.s16 q10, d10, d4[2] \n"// a22 x (k08-k38) "vmlal.s16 q11, d11, d4[2] \n"// a22 x (k48-k78) "vadd.s32 q8, q8, q6 \n" "vadd.s32 q9, q9, q7 \n" "sub %11, %11, #72 \n" "vadd.s32 q10, q10, q8 \n" "vadd.s32 q11, q11, q9 \n" "vst1.s32 {d20[0]}, [%0]! \n"// out0 "vst1.s32 {d20[1]}, [%1]! \n"// out1 "vst1.s32 {d21[0]}, [%2]! \n"// out2 "vst1.s32 {d21[1]}, [%3]! \n"// out3 "vst1.s32 {d22[0]}, [%4]! \n"// out4 "vst1.s32 {d22[1]}, [%5]! \n"// out5 "vst1.s32 {d23[0]}, [%6]! \n"// out6 "vst1.s32 {d23[1]}, [%7]! \n"// out7 : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else // __ARM_NEON short sum0 = 0; short sum1 = 0; short sum2 = 0; short sum3 = 0; short sum4 = 0; short sum5 = 0; short sum6 = 0; short sum7 = 0; sum0 += (short)r0[0] * ktmp[0]; sum1 += (short)r0[0] * ktmp[1]; sum2 += (short)r0[0] * ktmp[2]; sum3 += (short)r0[0] * ktmp[3]; sum4 += (short)r0[0] * ktmp[4]; sum5 += (short)r0[0] * ktmp[5]; sum6 += (short)r0[0] * ktmp[6]; sum7 += (short)r0[0] * ktmp[7]; ktmp += 8; sum0 += (short)r0[1] * ktmp[0]; sum1 += (short)r0[1] * ktmp[1]; sum2 += (short)r0[1] * ktmp[2]; sum3 += (short)r0[1] * ktmp[3]; sum4 += (short)r0[1] * ktmp[4]; sum5 += (short)r0[1] * ktmp[5]; sum6 += (short)r0[1] * ktmp[6]; sum7 += (short)r0[1] * ktmp[7]; ktmp += 8; sum0 += (short)r0[2] * ktmp[0]; sum1 += (short)r0[2] * ktmp[1]; sum2 += (short)r0[2] * ktmp[2]; sum3 += (short)r0[2] * ktmp[3]; sum4 += (short)r0[2] * ktmp[4]; sum5 += (short)r0[2] * ktmp[5]; sum6 += (short)r0[2] * ktmp[6]; sum7 += (short)r0[2] * ktmp[7]; ktmp += 8; sum0 += (short)r1[0] * ktmp[0]; sum1 += (short)r1[0] * ktmp[1]; sum2 += (short)r1[0] * ktmp[2]; sum3 += (short)r1[0] * ktmp[3]; sum4 += (short)r1[0] * ktmp[4]; sum5 += (short)r1[0] * ktmp[5]; sum6 += (short)r1[0] * ktmp[6]; sum7 += (short)r1[0] * ktmp[7]; ktmp += 8; sum0 += (short)r1[1] * ktmp[0]; sum1 += (short)r1[1] * ktmp[1]; sum2 += (short)r1[1] * ktmp[2]; sum3 += (short)r1[1] * ktmp[3]; sum4 += (short)r1[1] * ktmp[4]; sum5 += (short)r1[1] * ktmp[5]; sum6 += (short)r1[1] * ktmp[6]; sum7 += (short)r1[1] * ktmp[7]; ktmp += 8; sum0 += (short)r1[2] * ktmp[0]; sum1 += (short)r1[2] * ktmp[1]; sum2 += (short)r1[2] * ktmp[2]; sum3 += (short)r1[2] * ktmp[3]; sum4 += (short)r1[2] * ktmp[4]; sum5 += (short)r1[2] * ktmp[5]; sum6 += (short)r1[2] * ktmp[6]; sum7 += (short)r1[2] * ktmp[7]; ktmp += 8; sum0 += (short)r2[0] * ktmp[0]; sum1 += (short)r2[0] * ktmp[1]; sum2 += (short)r2[0] * ktmp[2]; sum3 += (short)r2[0] * ktmp[3]; sum4 += (short)r2[0] * ktmp[4]; sum5 += (short)r2[0] * ktmp[5]; sum6 += (short)r2[0] * ktmp[6]; sum7 += (short)r2[0] * ktmp[7]; ktmp += 8; sum0 += (short)r2[1] * ktmp[0]; sum1 += (short)r2[1] * ktmp[1]; sum2 += (short)r2[1] * ktmp[2]; sum3 += (short)r2[1] * ktmp[3]; sum4 += (short)r2[1] * ktmp[4]; sum5 += (short)r2[1] * ktmp[5]; sum6 += (short)r2[1] * ktmp[6]; sum7 += (short)r2[1] * ktmp[7]; ktmp += 8; sum0 += (short)r2[2] * ktmp[0]; sum1 += (short)r2[2] * ktmp[1]; sum2 += (short)r2[2] * ktmp[2]; sum3 += (short)r2[2] * ktmp[3]; sum4 += (short)r2[2] * ktmp[4]; sum5 += (short)r2[2] * ktmp[5]; sum6 += (short)r2[2] * ktmp[6]; sum7 += (short)r2[2] * ktmp[7]; ktmp += 8; // sum0 = (sum0 + 1) >> 1; // sum1 = (sum1 + 1) >> 1; // sum2 = (sum2 + 1) >> 1; // sum3 = (sum3 + 1) >> 1; // sum4 = (sum4 + 1) >> 1; // sum5 = (sum5 + 1) >> 1; // sum6 = (sum6 + 1) >> 1; // sum7 = (sum7 + 1) >> 1; *outptr0 = saturate2int16((int)(*outptr0) + sum0); *outptr1 = saturate2int16((int)(*outptr1) + sum1); *outptr2 = saturate2int16((int)(*outptr2) + sum2); *outptr3 = saturate2int16((int)(*outptr3) + sum3); *outptr4 = saturate2int16((int)(*outptr4) + sum4); *outptr5 = saturate2int16((int)(*outptr5) + sum5); *outptr6 = saturate2int16((int)(*outptr6) + sum6); *outptr7 = saturate2int16((int)(*outptr7) + sum7); ktmp -= 8*9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8*9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char* ktmp = _kernel.channel(p/8 + p%8); for (int q=0; q<inch; q++) { short* outptr = out; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; int16x8_t _int1 = vdupq_n_s16(1); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b}, [%5] \n"//ktmp (k0-k7) "ld2 {v2.8b, v3.8b}, [%2], #16 \n"//r0-r2 "ld2 {v4.8b, v5.8b}, [%2] \n" "ld2 {v6.8b, v7.8b}, [%3], #16 \n"//r3-r5 "ld2 {v8.8b, v9.8b}, [%3] \n" "ld2 {v10.8b, v11.8b}, [%4], #16 \n"//r6-r8 "ld2 {v12.8b, v13.8b}, [%4] \n" "ld1 {v14.8h}, [%1] \n"//out0 "ext v4.8b, v2.8b, v4.8b, #1 \n" "ext v8.8b, v6.8b, v8.8b, #1 \n" "ext v12.8b, v10.8b, v12.8b, #1 \n" "dup v15.8b, v0.b[0] \n" "dup v16.8b, v0.b[1] \n" "dup v17.8b, v0.b[2] \n" "dup v18.8b, v0.b[3] \n" "dup v19.8b, v0.b[4] \n" "dup v20.8b, v0.b[5] \n" "dup v21.8b, v0.b[6] \n" "dup v22.8b, v0.b[7] \n" "dup v23.8b, v1.b[0] \n" // r0 "smull v24.8h, v2.8b, v15.8b \n"// out0 = r0*k0 "smull v25.8h, v3.8b, v16.8b \n"// out1 = r1*k1 "smull v26.8h, v4.8b, v17.8b \n"// out2 = r2*k2 "smlal v24.8h, v6.8b, v18.8b \n"// out0 += r3*k3 "smlal v25.8h, v7.8b, v19.8b \n"// out1 += r4*k4 "smlal v26.8h, v8.8b, v20.8b \n"// out2 += r5*k5 "smlal v24.8h, v10.8b, v21.8b \n"// out0 += r6*k6 "smlal v25.8h, v11.8b, v22.8b \n"// out1 += r7*k7 "smlal v26.8h, v12.8b, v23.8b \n"// out2 += r8*k8 "add v24.8h, v24.8h, v25.8h \n" "add v24.8h, v24.8h, v26.8h \n" // "add v24.8h, v24.8h, %12.8h \n" // "sshr v24.8h, v24.8h, #1 \n" "sqadd v14.8h, v14.8h, v24.8h \n" "st1 {v14.8h}, [%1], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp), "w"(_int1) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } #else if (nn > 0) { asm volatile( "vld1.s8 {d0-d1}, [%5] \n"// d0(k0 - k7) d1(k8 ...) "vmovl.s8 q1, d1 \n"// d2(k8 ...) "vmovl.s8 q0, d0 \n"// d0(k0 - k3) d1(k4 - k7) "0: \n" "pld [%2, #192] \n" "vld2.s8 {d4-d5}, [%2]! \n"// r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015) "vld2.s8 {d8-d9}, [%2] \n"// d8(a016 ....) "vld2.s8 {d10-d11}, [%3]! \n"// r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115) "vld2.s8 {d14-d15}, [%3] \n"// d14(a116 ....) "vld2.s8 {d16-d17}, [%4]! \n"// r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215) "vld2.s8 {d20-d21}, [%4] \n"// d20(a216 ....) "vld1.s32 {d22-d25}, [%1] \n"// q11(out0 - out3) q12(out4 - out7) "vext.s8 d8, d4, d8, #1 \n"// d8(a02 a04 ... a016) "vext.s8 d14, d10, d14, #1 \n"// d14(a12 a14 ... a116) "vext.s8 d20, d16, d20, #1 \n"// d20(a22 a24 ... a216) "vmovl.s8 q3, d5 \n"// q3(a01 a03 ... a015) "vmovl.s8 q2, d4 \n"// q2(a00 a02 ... a014) "vmovl.s8 q4, d8 \n"// q4(a02 a04 ... a016) "vmovl.s8 q6, d11 \n"// q6(a11 a13 ... a115) "vmovl.s8 q5, d10 \n"// q5(a10 a12 ... a114) "vmovl.s8 q7, d14 \n"// q7(a12 a14 ... a116) "vmovl.s8 q9, d17 \n"// q9(a21 a23 ... a215) "vmovl.s8 q8, d16 \n"// q8(a20 a22 ... a214) "vmovl.s8 q10, d20 \n"// q10(a22 a24 ... a216) "vmlal.s16 q11, d4, d0[0] \n"// k0 "vmlal.s16 q12, d5, d0[0] \n" "vmull.s16 q13, d6, d0[1] \n"// k1 "vmull.s16 q14, d7, d0[1] \n" "vmlal.s16 q11, d8, d0[2] \n"// k2 "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q13, d12, d1[0] \n"// k4 "vmlal.s16 q14, d13, d1[0] \n" "vmlal.s16 q11, d10, d0[3] \n"// k3 "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q13, d14, d1[1] \n"// k5 "vmlal.s16 q14, d15, d1[1] \n" "vmlal.s16 q11, d16, d1[2] \n"// k6 "vmlal.s16 q12, d17, d1[2] \n" "vmlal.s16 q13, d18, d1[3] \n"// k7 "vmlal.s16 q14, d19, d1[3] \n" "vmlal.s16 q11, d20, d2[0] \n"// k8 "vmlal.s16 q12, d21, d2[0] \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.32 {d22-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON if (remain > 0) { #if __ARM_NEON int8x8_t _k01234567s8 = vld1_s8(ktmp); int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp+8); int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3); int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6); #endif for (; remain>0; remain--) { #if __ARM_NEON int8x8_t _r00 = vld1_s8(r0); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r20 = vld1_s8(r2); int16x8_t _sum = vmull_s16(_r00, _k01234567s8); _sum = vmlal_s8(_sum, _r10, _k34567xxxs8); _sum = vmlal_s8(_sum, _r20, _k678xxxxxs8); int16x4_t _sum_n = vget_low_s16(_sum); _sum_n = vset_lane_s16(*outptr, _sum_n, 3); #if __aarch64__ *outptr = vaddv_s16(_sum_n); #else *outptr = _sum_n[0] + _sum_n[1] + _sum_n[2] + _sum_n[3]; #endif // __aarch64__ #else int sum = 0; sum += (short)r0[0] * ktmp[0]; sum += (short)r0[1] * ktmp[1]; sum += (short)r0[2] * ktmp[2]; sum += (short)r1[0] * ktmp[3]; sum += (short)r1[1] * ktmp[4]; sum += (short)r1[2] * ktmp[5]; sum += (short)r2[0] * ktmp[6]; sum += (short)r2[1] * ktmp[7]; sum += (short)r2[2] * ktmp[8]; // sum = (sum + 1) >> 1; *outptr = saturate2int16((int)(*outptr) + sum); #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } } static void conv3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv3x3s1_int8_dequant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_dequant_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt); } static void conv3x3s2_int8_dequant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_dequant_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt); } static void conv3x3s1_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_requant_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt); } static void conv3x3s2_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_requant_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt); }
gimple.h
/* Gimple IR definitions. Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc. Contributed by Aldy Hernandez <aldyh@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_GIMPLE_H #define GCC_GIMPLE_H #include "pointer-set.h" #include "vec.h" #include "vecprim.h" #include "vecir.h" #include "ggc.h" #include "basic-block.h" #include "tree-ssa-operands.h" #include "tree-ssa-alias.h" struct gimple_seq_node_d; typedef struct gimple_seq_node_d *gimple_seq_node; typedef const struct gimple_seq_node_d *const_gimple_seq_node; /* For each block, the PHI nodes that need to be rewritten are stored into these vectors. */ typedef VEC(gimple, heap) *gimple_vec; DEF_VEC_P (gimple_vec); DEF_VEC_ALLOC_P (gimple_vec, heap); enum gimple_code { #define DEFGSCODE(SYM, STRING, STRUCT) SYM, #include "gimple.def" #undef DEFGSCODE LAST_AND_UNUSED_GIMPLE_CODE }; extern const char *const gimple_code_name[]; extern const unsigned char gimple_rhs_class_table[]; /* Error out if a gimple tuple is addressed incorrectly. */ #if defined ENABLE_GIMPLE_CHECKING #define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR) extern void gimple_check_failed (const_gimple, const char *, int, \ const char *, enum gimple_code, \ enum tree_code) ATTRIBUTE_NORETURN; #define GIMPLE_CHECK(GS, CODE) \ do { \ const_gimple __gs = (GS); \ if (gimple_code (__gs) != (CODE)) \ gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \ (CODE), ERROR_MARK); \ } while (0) #else /* not ENABLE_GIMPLE_CHECKING */ #define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR))) #define GIMPLE_CHECK(GS, CODE) (void)0 #endif /* Class of GIMPLE expressions suitable for the RHS of assignments. See get_gimple_rhs_class. */ enum gimple_rhs_class { GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */ GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */ GIMPLE_BINARY_RHS, /* The expression is a binary operation. */ GIMPLE_UNARY_RHS, /* The expression is a unary operation. */ GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA name, a _DECL, a _REF, etc. */ }; /* Specific flags for individual GIMPLE statements. These flags are always stored in gimple_statement_base.subcode and they may only be defined for statement codes that do not use sub-codes. Values for the masks can overlap as long as the overlapping values are never used in the same statement class. The maximum mask value that can be defined is 1 << 15 (i.e., each statement code can hold up to 16 bitflags). Keep this list sorted. */ enum gf_mask { GF_ASM_INPUT = 1 << 0, GF_ASM_VOLATILE = 1 << 1, GF_CALL_CANNOT_INLINE = 1 << 0, GF_CALL_FROM_THUNK = 1 << 1, GF_CALL_RETURN_SLOT_OPT = 1 << 2, GF_CALL_TAILCALL = 1 << 3, GF_CALL_VA_ARG_PACK = 1 << 4, GF_CALL_NOTHROW = 1 << 5, GF_OMP_PARALLEL_COMBINED = 1 << 0, /* True on an GIMPLE_OMP_RETURN statement if the return does not require a thread synchronization via some sort of barrier. The exact barrier that would otherwise be emitted is dependent on the OMP statement with which this return is associated. */ GF_OMP_RETURN_NOWAIT = 1 << 0, GF_OMP_SECTION_LAST = 1 << 0, GF_PREDICT_TAKEN = 1 << 15 }; /* Currently, there's only one type of gimple debug stmt. Others are envisioned, for example, to enable the generation of is_stmt notes in line number information, to mark sequence points, etc. This subcode is to be used to tell them apart. */ enum gimple_debug_subcode { GIMPLE_DEBUG_BIND = 0 }; /* Masks for selecting a pass local flag (PLF) to work on. These masks are used by gimple_set_plf and gimple_plf. */ enum plf_mask { GF_PLF_1 = 1 << 0, GF_PLF_2 = 1 << 1 }; /* A node in a gimple_seq_d. */ struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d { gimple stmt; struct gimple_seq_node_d *prev; struct gimple_seq_node_d *next; }; /* A double-linked sequence of gimple statements. */ struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d { /* First and last statements in the sequence. */ gimple_seq_node first; gimple_seq_node last; /* Sequences are created/destroyed frequently. To minimize allocation activity, deallocated sequences are kept in a pool of available sequences. This is the pointer to the next free sequence in the pool. */ gimple_seq next_free; }; /* Return the first node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_first (const_gimple_seq s) { return s ? s->first : NULL; } /* Return the first statement in GIMPLE sequence S. */ static inline gimple gimple_seq_first_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_first (s); return (n) ? n->stmt : NULL; } /* Return the last node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_last (const_gimple_seq s) { return s ? s->last : NULL; } /* Return the last statement in GIMPLE sequence S. */ static inline gimple gimple_seq_last_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_last (s); return (n) ? n->stmt : NULL; } /* Set the last node in GIMPLE sequence S to LAST. */ static inline void gimple_seq_set_last (gimple_seq s, gimple_seq_node last) { s->last = last; } /* Set the first node in GIMPLE sequence S to FIRST. */ static inline void gimple_seq_set_first (gimple_seq s, gimple_seq_node first) { s->first = first; } /* Return true if GIMPLE sequence S is empty. */ static inline bool gimple_seq_empty_p (const_gimple_seq s) { return s == NULL || s->first == NULL; } void gimple_seq_add_stmt (gimple_seq *, gimple); /* Link gimple statement GS to the end of the sequence *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_stmt, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ void gimplify_seq_add_stmt (gimple_seq *, gimple); /* Allocate a new sequence and initialize its first element with STMT. */ static inline gimple_seq gimple_seq_alloc_with_stmt (gimple stmt) { gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, stmt); return seq; } /* Returns the sequence of statements in BB. */ static inline gimple_seq bb_seq (const_basic_block bb) { return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL; } /* Sets the sequence of statements in BB to SEQ. */ static inline void set_bb_seq (basic_block bb, gimple_seq seq) { gcc_checking_assert (!(bb->flags & BB_RTL)); bb->il.gimple->seq = seq; } /* Iterator object for GIMPLE statement sequences. */ typedef struct { /* Sequence node holding the current statement. */ gimple_seq_node ptr; /* Sequence and basic block holding the statement. These fields are necessary to handle edge cases such as when statement is added to an empty basic block or when the last statement of a block/sequence is removed. */ gimple_seq seq; basic_block bb; } gimple_stmt_iterator; /* Data structure definitions for GIMPLE tuples. NOTE: word markers are for 64 bit hosts. */ struct GTY(()) gimple_statement_base { /* [ WORD 1 ] Main identifying code for a tuple. */ ENUM_BITFIELD(gimple_code) code : 8; /* Nonzero if a warning should not be emitted on this tuple. */ unsigned int no_warning : 1; /* Nonzero if this tuple has been visited. Passes are responsible for clearing this bit before using it. */ unsigned int visited : 1; /* Nonzero if this tuple represents a non-temporal move. */ unsigned int nontemporal_move : 1; /* Pass local flags. These flags are free for any pass to use as they see fit. Passes should not assume that these flags contain any useful value when the pass starts. Any initial state that the pass requires should be set on entry to the pass. See gimple_set_plf and gimple_plf for usage. */ unsigned int plf : 2; /* Nonzero if this statement has been modified and needs to have its operands rescanned. */ unsigned modified : 1; /* Nonzero if this statement contains volatile operands. */ unsigned has_volatile_ops : 1; /* Padding to get subcode to 16 bit alignment. */ unsigned pad : 1; /* The SUBCODE field can be used for tuple-specific flags for tuples that do not require subcodes. Note that SUBCODE should be at least as wide as tree codes, as several tuples store tree codes in there. */ unsigned int subcode : 16; /* UID of this statement. This is used by passes that want to assign IDs to statements. It must be assigned and used by each pass. By default it should be assumed to contain garbage. */ unsigned uid; /* [ WORD 2 ] Locus information for debug info. */ location_t location; /* Number of operands in this tuple. */ unsigned num_ops; /* [ WORD 3 ] Basic block holding this statement. */ struct basic_block_def *bb; /* [ WORD 4 ] Lexical block holding this statement. */ tree block; }; /* Base structure for tuples with operands. */ struct GTY(()) gimple_statement_with_ops_base { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] SSA operand vectors. NOTE: It should be possible to amalgamate these vectors with the operand vector OP. However, the SSA operand vectors are organized differently and contain more information (like immediate use chaining). */ struct def_optype_d GTY((skip (""))) *def_ops; struct use_optype_d GTY((skip (""))) *use_ops; }; /* Statements that take register operands. */ struct GTY(()) gimple_statement_with_ops { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1]; }; /* Base for statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops_base { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7-8 ] Virtual operands for this statement. The GC will pick them up via the ssa_names array. */ tree GTY((skip (""))) vdef; tree GTY((skip (""))) vuse; }; /* Statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* Call statements that take both memory and register operands. */ struct GTY(()) gimple_statement_call { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9-12 ] */ struct pt_solution call_used; struct pt_solution call_clobbered; /* [ WORD 13 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* OpenMP statements (#pragma omp). */ struct GTY(()) gimple_statement_omp { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ gimple_seq body; }; /* GIMPLE_BIND */ struct GTY(()) gimple_statement_bind { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Variables declared in this scope. */ tree vars; /* [ WORD 6 ] This is different than the BLOCK field in gimple_statement_base, which is analogous to TREE_BLOCK (i.e., the lexical block holding this statement). This field is the equivalent of BIND_EXPR_BLOCK in tree land (i.e., the lexical scope defined by this bind). See gimple-low.c. */ tree block; /* [ WORD 7 ] */ gimple_seq body; }; /* GIMPLE_CATCH */ struct GTY(()) gimple_statement_catch { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree types; /* [ WORD 6 ] */ gimple_seq handler; }; /* GIMPLE_EH_FILTER */ struct GTY(()) gimple_statement_eh_filter { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Filter types. */ tree types; /* [ WORD 6 ] Failure actions. */ gimple_seq failure; }; /* GIMPLE_EH_MUST_NOT_THROW */ struct GTY(()) gimple_statement_eh_mnt { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Abort function decl. */ tree fndecl; }; /* GIMPLE_PHI */ struct GTY(()) gimple_statement_phi { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ unsigned capacity; unsigned nargs; /* [ WORD 6 ] */ tree result; /* [ WORD 7 ] */ struct phi_arg_d GTY ((length ("%h.nargs"))) args[1]; }; /* GIMPLE_RESX, GIMPLE_EH_DISPATCH */ struct GTY(()) gimple_statement_eh_ctrl { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Exception region number. */ int region; }; /* GIMPLE_TRY */ struct GTY(()) gimple_statement_try { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Expression to evaluate. */ gimple_seq eval; /* [ WORD 6 ] Cleanup expression. */ gimple_seq cleanup; }; /* Kind of GIMPLE_TRY statements. */ enum gimple_try_flags { /* A try/catch. */ GIMPLE_TRY_CATCH = 1 << 0, /* A try/finally. */ GIMPLE_TRY_FINALLY = 1 << 1, GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY, /* Analogous to TRY_CATCH_IS_CLEANUP. */ GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2 }; /* GIMPLE_WITH_CLEANUP_EXPR */ struct GTY(()) gimple_statement_wce { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be executed if an exception is thrown, not on normal exit of its scope. This flag is analogous to the CLEANUP_EH_ONLY flag in TARGET_EXPRs. */ /* [ WORD 5 ] Cleanup expression. */ gimple_seq cleanup; }; /* GIMPLE_ASM */ struct GTY(()) gimple_statement_asm { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] __asm__ statement. */ const char *string; /* [ WORD 10 ] Number of inputs, outputs, clobbers, labels. */ unsigned char ni; unsigned char no; unsigned char nc; unsigned char nl; /* [ WORD 11 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* GIMPLE_OMP_CRITICAL */ struct GTY(()) gimple_statement_omp_critical { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Critical section name. */ tree name; }; struct GTY(()) gimple_omp_for_iter { /* Condition code. */ enum tree_code cond; /* Index variable. */ tree index; /* Initial value. */ tree initial; /* Final value. */ tree final; /* Increment. */ tree incr; }; /* GIMPLE_OMP_FOR */ struct GTY(()) gimple_statement_omp_for { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] Number of elements in iter array. */ size_t collapse; /* [ WORD 8 ] */ struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter; /* [ WORD 9 ] Pre-body evaluated before the loop body begins. */ gimple_seq pre_body; }; /* GIMPLE_OMP_PARALLEL */ struct GTY(()) gimple_statement_omp_parallel { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Clauses. */ tree clauses; /* [ WORD 7 ] Child function holding the body of the parallel region. */ tree child_fn; /* [ WORD 8 ] Shared data argument. */ tree data_arg; }; /* GIMPLE_OMP_TASK */ struct GTY(()) gimple_statement_omp_task { /* [ WORD 1-8 ] */ struct gimple_statement_omp_parallel par; /* [ WORD 9 ] Child function holding firstprivate initialization if needed. */ tree copy_fn; /* [ WORD 10-11 ] Size and alignment in bytes of the argument data block. */ tree arg_size; tree arg_align; }; /* GIMPLE_OMP_SECTION */ /* Uses struct gimple_statement_omp. */ /* GIMPLE_OMP_SECTIONS */ struct GTY(()) gimple_statement_omp_sections { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] The control variable used for deciding which of the sections to execute. */ tree control; }; /* GIMPLE_OMP_CONTINUE. Note: This does not inherit from gimple_statement_omp, because we do not need the body field. */ struct GTY(()) gimple_statement_omp_continue { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree control_def; /* [ WORD 6 ] */ tree control_use; }; /* GIMPLE_OMP_SINGLE */ struct GTY(()) gimple_statement_omp_single { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; }; /* GIMPLE_OMP_ATOMIC_LOAD. Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp contains a sequence, which we don't need here. */ struct GTY(()) gimple_statement_omp_atomic_load { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] */ tree rhs, lhs; }; /* GIMPLE_OMP_ATOMIC_STORE. See note on GIMPLE_OMP_ATOMIC_LOAD. */ struct GTY(()) gimple_statement_omp_atomic_store { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree val; }; #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM, enum gimple_statement_structure_enum { #include "gsstruct.def" LAST_GSS_ENUM }; #undef DEFGSSTRUCT /* Define the overall contents of a gimple tuple. It may be any of the structures declared above for various types of tuples. */ union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_statement_d { struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase; struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops; struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase; struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem; struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call; struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp; struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind; struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch; struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter; struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt; struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi; struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl; struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try; struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce; struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm; struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical; struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for; struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel; struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task; struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections; struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single; struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue; struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load; struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store; }; /* In gimple.c. */ /* Offset in bytes to the location of the operand vector. Zero if there is no operand vector for this tuple structure. */ extern size_t const gimple_ops_offset_[]; /* Map GIMPLE codes to GSS codes. */ extern enum gimple_statement_structure_enum const gss_for_code_[]; /* This variable holds the currently expanded gimple statement for purposes of comminucating the profile info to the builtin expanders. */ extern gimple currently_expanding_gimple_stmt; gimple gimple_build_return (tree); gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL); #define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO) void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *); gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree, tree, tree MEM_STAT_DECL); #define gimple_build_assign_with_ops(c,o1,o2,o3) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, NULL_TREE MEM_STAT_INFO) #define gimple_build_assign_with_ops3(c,o1,o2,o3,o4) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, o4 MEM_STAT_INFO) gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_bind(var,val,stmt) \ gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_call_vec (tree, VEC(tree, heap) *); gimple gimple_build_call (tree, unsigned, ...); gimple gimple_build_call_from_tree (tree); gimple gimplify_assign (tree, tree, gimple_seq *); gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree); gimple gimple_build_label (tree label); gimple gimple_build_goto (tree dest); gimple gimple_build_nop (void); gimple gimple_build_bind (tree, gimple_seq, tree); gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *); gimple gimple_build_catch (tree, gimple_seq); gimple gimple_build_eh_filter (tree, gimple_seq); gimple gimple_build_eh_must_not_throw (tree); gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags); gimple gimple_build_wce (gimple_seq); gimple gimple_build_resx (int); gimple gimple_build_eh_dispatch (int); gimple gimple_build_switch_nlabels (unsigned, tree, tree); gimple gimple_build_switch (unsigned, tree, tree, ...); gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *); gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree); gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree); gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq); gimple gimple_build_omp_critical (gimple_seq, tree); gimple gimple_build_omp_section (gimple_seq); gimple gimple_build_omp_continue (tree, tree); gimple gimple_build_omp_master (gimple_seq); gimple gimple_build_omp_return (bool); gimple gimple_build_omp_ordered (gimple_seq); gimple gimple_build_omp_sections (gimple_seq, tree); gimple gimple_build_omp_sections_switch (void); gimple gimple_build_omp_single (gimple_seq, tree); gimple gimple_build_cdt (tree, tree); gimple gimple_build_omp_atomic_load (tree, tree); gimple gimple_build_omp_atomic_store (tree); gimple gimple_build_predict (enum br_predictor, enum prediction); enum gimple_statement_structure_enum gss_for_assign (enum tree_code); void sort_case_labels (VEC(tree,heap) *); void gimple_set_body (tree, gimple_seq); gimple_seq gimple_body (tree); bool gimple_has_body_p (tree); gimple_seq gimple_seq_alloc (void); void gimple_seq_free (gimple_seq); void gimple_seq_add_seq (gimple_seq *, gimple_seq); gimple_seq gimple_seq_copy (gimple_seq); int gimple_call_flags (const_gimple); int gimple_call_return_flags (const_gimple); int gimple_call_arg_flags (const_gimple, unsigned); void gimple_call_reset_alias_info (gimple); bool gimple_assign_copy_p (gimple); bool gimple_assign_ssa_name_copy_p (gimple); bool gimple_assign_unary_nop_p (gimple); void gimple_set_bb (gimple, struct basic_block_def *); void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree); void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code, tree, tree, tree); tree gimple_get_lhs (const_gimple); void gimple_set_lhs (gimple, tree); void gimple_replace_lhs (gimple, tree); gimple gimple_copy (gimple); void gimple_set_modified (gimple, bool); void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *); gimple gimple_build_cond_from_tree (tree, tree, tree); void gimple_cond_set_condition_from_tree (gimple, tree); bool gimple_has_side_effects (const_gimple); bool gimple_rhs_has_side_effects (const_gimple); bool gimple_could_trap_p (gimple); bool gimple_could_trap_p_1 (gimple, bool, bool); bool gimple_assign_rhs_could_trap_p (gimple); void gimple_regimplify_operands (gimple, gimple_stmt_iterator *); bool empty_body_p (gimple_seq); unsigned get_gimple_rhs_num_ops (enum tree_code); #define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO) gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL); const char *gimple_decl_printable_name (tree, int); bool gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace); tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree, tree *, bool); void gimple_adjust_this_by_delta (gimple_stmt_iterator *, tree); /* Returns true iff T is a valid GIMPLE statement. */ extern bool is_gimple_stmt (tree); /* Returns true iff TYPE is a valid type for a scalar register variable. */ extern bool is_gimple_reg_type (tree); /* Returns true iff T is a scalar register variable. */ extern bool is_gimple_reg (tree); /* Returns true iff T is any sort of variable. */ extern bool is_gimple_variable (tree); /* Returns true iff T is any sort of symbol. */ extern bool is_gimple_id (tree); /* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */ extern bool is_gimple_min_lval (tree); /* Returns true iff T is something whose address can be taken. */ extern bool is_gimple_addressable (tree); /* Returns true iff T is any valid GIMPLE lvalue. */ extern bool is_gimple_lvalue (tree); /* Returns true iff T is a GIMPLE address. */ bool is_gimple_address (const_tree); /* Returns true iff T is a GIMPLE invariant address. */ bool is_gimple_invariant_address (const_tree); /* Returns true iff T is a GIMPLE invariant address at interprocedural level. */ bool is_gimple_ip_invariant_address (const_tree); /* Returns true iff T is a valid GIMPLE constant. */ bool is_gimple_constant (const_tree); /* Returns true iff T is a GIMPLE restricted function invariant. */ extern bool is_gimple_min_invariant (const_tree); /* Returns true iff T is a GIMPLE restricted interprecodural invariant. */ extern bool is_gimple_ip_invariant (const_tree); /* Returns true iff T is a GIMPLE rvalue. */ extern bool is_gimple_val (tree); /* Returns true iff T is a GIMPLE asm statement input. */ extern bool is_gimple_asm_val (tree); /* Returns true iff T is a valid address operand of a MEM_REF. */ bool is_gimple_mem_ref_addr (tree); /* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a GIMPLE temporary, a renamed user variable, or something else, respectively. */ extern bool is_gimple_reg_rhs (tree); extern bool is_gimple_mem_rhs (tree); /* Returns true iff T is a valid if-statement condition. */ extern bool is_gimple_condexpr (tree); /* Returns true iff T is a variable that does not need to live in memory. */ extern bool is_gimple_non_addressable (tree t); /* Returns true iff T is a valid call address expression. */ extern bool is_gimple_call_addr (tree); /* If T makes a function call, returns the CALL_EXPR operand. */ extern tree get_call_expr_in (tree t); extern void recalculate_side_effects (tree); extern bool gimple_compare_field_offset (tree, tree); extern tree gimple_register_type (tree); extern tree gimple_register_canonical_type (tree); enum gtc_mode { GTC_MERGE = 0, GTC_DIAG = 1 }; extern bool gimple_types_compatible_p (tree, tree, enum gtc_mode); extern void print_gimple_types_stats (void); extern void free_gimple_type_tables (void); extern tree gimple_unsigned_type (tree); extern tree gimple_signed_type (tree); extern alias_set_type gimple_get_alias_set (tree); extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *, unsigned *); extern bool walk_stmt_load_store_addr_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool walk_stmt_load_store_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool gimple_ior_addresses_taken (bitmap, gimple); extern bool gimple_call_builtin_p (gimple, enum built_in_function); /* In gimplify.c */ extern tree create_tmp_var_raw (tree, const char *); extern tree create_tmp_var_name (const char *); extern tree create_tmp_var (tree, const char *); extern tree create_tmp_reg (tree, const char *); extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *); extern tree get_formal_tmp_var (tree, gimple_seq *); extern void declare_vars (tree, gimple, bool); extern void annotate_all_with_location (gimple_seq, location_t); /* Validation of GIMPLE expressions. Note that these predicates only check the basic form of the expression, they don't recurse to make sure that underlying nodes are also of the right form. */ typedef bool (*gimple_predicate)(tree); /* FIXME we should deduce this from the predicate. */ enum fallback { fb_none = 0, /* Do not generate a temporary. */ fb_rvalue = 1, /* Generate an rvalue to hold the result of a gimplified expression. */ fb_lvalue = 2, /* Generate an lvalue to hold the result of a gimplified expression. */ fb_mayfail = 4, /* Gimplification may fail. Error issued afterwards. */ fb_either= fb_rvalue | fb_lvalue }; typedef int fallback_t; enum gimplify_status { GS_ERROR = -2, /* Something Bad Seen. */ GS_UNHANDLED = -1, /* A langhook result for "I dunno". */ GS_OK = 0, /* We did something, maybe more to do. */ GS_ALL_DONE = 1 /* The expression is fully gimplified. */ }; struct gimplify_ctx { struct gimplify_ctx *prev_context; VEC(gimple,heap) *bind_expr_stack; tree temps; gimple_seq conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; bool allow_rhs_cond_expr; }; extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *, bool (*) (tree), fallback_t); extern void gimplify_type_sizes (tree, gimple_seq *); extern void gimplify_one_sizepos (tree *, gimple_seq *); extern bool gimplify_stmt (tree *, gimple_seq *); extern gimple gimplify_body (tree *, tree, bool); extern void push_gimplify_context (struct gimplify_ctx *); extern void pop_gimplify_context (gimple); extern void gimplify_and_add (tree, gimple_seq *); /* Miscellaneous helpers. */ extern void gimple_add_tmp_var (tree); extern gimple gimple_current_bind_expr (void); extern VEC(gimple, heap) *gimple_bind_expr_stack (void); extern tree voidify_wrapper_expr (tree, tree); extern tree build_and_jump (tree *); extern tree force_labels_r (tree *, int *, void *); extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *, gimple_seq *); struct gimplify_omp_ctx; extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree); extern tree gimple_boolify (tree); extern gimple_predicate rhs_predicate_for (tree); extern tree canonicalize_cond_expr_cond (tree); /* In omp-low.c. */ extern tree omp_reduction_init (tree, tree); /* In tree-nested.c. */ extern void lower_nested_functions (tree); extern void insert_field_into_struct (tree, tree); /* In gimplify.c. */ extern void gimplify_function_tree (tree); /* In cfgexpand.c. */ extern tree gimple_assign_rhs_to_tree (gimple); /* In builtins.c */ extern bool validate_gimple_arglist (const_gimple, ...); /* In tree-ssa.c */ extern bool tree_ssa_useless_type_conversion (tree); extern tree tree_ssa_strip_useless_type_conversions (tree); extern bool useless_type_conversion_p (tree, tree); extern bool types_compatible_p (tree, tree); /* Return the code for GIMPLE statement G. */ static inline enum gimple_code gimple_code (const_gimple g) { return g->gsbase.code; } /* Return the GSS code used by a GIMPLE code. */ static inline enum gimple_statement_structure_enum gss_for_code (enum gimple_code code) { gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE); return gss_for_code_[code]; } /* Return which GSS code is used by GS. */ static inline enum gimple_statement_structure_enum gimple_statement_structure (gimple gs) { return gss_for_code (gimple_code (gs)); } /* Return true if statement G has sub-statements. This is only true for High GIMPLE statements. */ static inline bool gimple_has_substatements (gimple g) { switch (gimple_code (g)) { case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_TRY: case GIMPLE_OMP_FOR: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_CRITICAL: case GIMPLE_WITH_CLEANUP_EXPR: return true; default: return false; } } /* Return the basic block holding statement G. */ static inline struct basic_block_def * gimple_bb (const_gimple g) { return g->gsbase.bb; } /* Return the lexical scope block holding statement G. */ static inline tree gimple_block (const_gimple g) { return g->gsbase.block; } /* Set BLOCK to be the lexical scope block holding statement G. */ static inline void gimple_set_block (gimple g, tree block) { g->gsbase.block = block; } /* Return location information for statement G. */ static inline location_t gimple_location (const_gimple g) { return g->gsbase.location; } /* Return pointer to location information for statement G. */ static inline const location_t * gimple_location_ptr (const_gimple g) { return &g->gsbase.location; } /* Set location information for statement G. */ static inline void gimple_set_location (gimple g, location_t location) { g->gsbase.location = location; } /* Return true if G contains location information. */ static inline bool gimple_has_location (const_gimple g) { return gimple_location (g) != UNKNOWN_LOCATION; } /* Return the file name of the location of STMT. */ static inline const char * gimple_filename (const_gimple stmt) { return LOCATION_FILE (gimple_location (stmt)); } /* Return the line number of the location of STMT. */ static inline int gimple_lineno (const_gimple stmt) { return LOCATION_LINE (gimple_location (stmt)); } /* Determine whether SEQ is a singleton. */ static inline bool gimple_seq_singleton_p (gimple_seq seq) { return ((gimple_seq_first (seq) != NULL) && (gimple_seq_first (seq) == gimple_seq_last (seq))); } /* Return true if no warnings should be emitted for statement STMT. */ static inline bool gimple_no_warning_p (const_gimple stmt) { return stmt->gsbase.no_warning; } /* Set the no_warning flag of STMT to NO_WARNING. */ static inline void gimple_set_no_warning (gimple stmt, bool no_warning) { stmt->gsbase.no_warning = (unsigned) no_warning; } /* Set the visited status on statement STMT to VISITED_P. */ static inline void gimple_set_visited (gimple stmt, bool visited_p) { stmt->gsbase.visited = (unsigned) visited_p; } /* Return the visited status for statement STMT. */ static inline bool gimple_visited_p (gimple stmt) { return stmt->gsbase.visited; } /* Set pass local flag PLF on statement STMT to VAL_P. */ static inline void gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p) { if (val_p) stmt->gsbase.plf |= (unsigned int) plf; else stmt->gsbase.plf &= ~((unsigned int) plf); } /* Return the value of pass local flag PLF on statement STMT. */ static inline unsigned int gimple_plf (gimple stmt, enum plf_mask plf) { return stmt->gsbase.plf & ((unsigned int) plf); } /* Set the UID of statement. */ static inline void gimple_set_uid (gimple g, unsigned uid) { g->gsbase.uid = uid; } /* Return the UID of statement. */ static inline unsigned gimple_uid (const_gimple g) { return g->gsbase.uid; } /* Return true if GIMPLE statement G has register or memory operands. */ static inline bool gimple_has_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN; } /* Return true if GIMPLE statement G has memory operands. */ static inline bool gimple_has_mem_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN; } /* Return the set of DEF operands for statement G. */ static inline struct def_optype_d * gimple_def_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.def_ops; } /* Set DEF to be the set of DEF operands for statement G. */ static inline void gimple_set_def_ops (gimple g, struct def_optype_d *def) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.def_ops = def; } /* Return the set of USE operands for statement G. */ static inline struct use_optype_d * gimple_use_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.use_ops; } /* Set USE to be the set of USE operands for statement G. */ static inline void gimple_set_use_ops (gimple g, struct use_optype_d *use) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.use_ops = use; } /* Return the set of VUSE operand for statement G. */ static inline use_operand_p gimple_vuse_op (const_gimple g) { struct use_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_USE_OPERAND_P; ops = g->gsops.opbase.use_ops; if (ops && USE_OP_PTR (ops)->use == &g->gsmembase.vuse) return USE_OP_PTR (ops); return NULL_USE_OPERAND_P; } /* Return the set of VDEF operand for statement G. */ static inline def_operand_p gimple_vdef_op (const_gimple g) { struct def_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_DEF_OPERAND_P; ops = g->gsops.opbase.def_ops; if (ops && DEF_OP_PTR (ops) == &g->gsmembase.vdef) return DEF_OP_PTR (ops); return NULL_DEF_OPERAND_P; } /* Return the single VUSE operand of the statement G. */ static inline tree gimple_vuse (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree gimple_vdef (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vdef; } /* Return the single VUSE operand of the statement G. */ static inline tree * gimple_vuse_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree * gimple_vdef_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vdef; } /* Set the single VUSE operand of the statement G. */ static inline void gimple_set_vuse (gimple g, tree vuse) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vuse = vuse; } /* Set the single VDEF operand of the statement G. */ static inline void gimple_set_vdef (gimple g, tree vdef) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vdef = vdef; } /* Return true if statement G has operands and the modified field has been set. */ static inline bool gimple_modified_p (const_gimple g) { return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false; } /* Return the tree code for the expression computed by STMT. This is only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For GIMPLE_CALL, return CALL_EXPR as the expression code for consistency. This is useful when the caller needs to deal with the three kinds of computation that GIMPLE supports. */ static inline enum tree_code gimple_expr_code (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_COND) return (enum tree_code) stmt->gsbase.subcode; else { gcc_gimple_checking_assert (code == GIMPLE_CALL); return CALL_EXPR; } } /* Mark statement S as modified, and update it. */ static inline void update_stmt (gimple s) { if (gimple_has_ops (s)) { gimple_set_modified (s, true); update_stmt_operands (s); } } /* Update statement S if it has been optimized. */ static inline void update_stmt_if_modified (gimple s) { if (gimple_modified_p (s)) update_stmt_operands (s); } /* Return true if statement STMT contains volatile operands. */ static inline bool gimple_has_volatile_ops (const_gimple stmt) { if (gimple_has_mem_ops (stmt)) return stmt->gsbase.has_volatile_ops; else return false; } /* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */ static inline void gimple_set_has_volatile_ops (gimple stmt, bool volatilep) { if (gimple_has_mem_ops (stmt)) stmt->gsbase.has_volatile_ops = (unsigned) volatilep; } /* Return true if statement STMT may access memory. */ static inline bool gimple_references_memory_p (gimple stmt) { return gimple_has_mem_ops (stmt) && gimple_vuse (stmt); } /* Return the subcode for OMP statement S. */ static inline unsigned gimple_omp_subcode (const_gimple s) { gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD && gimple_code (s) <= GIMPLE_OMP_SINGLE); return s->gsbase.subcode; } /* Set the subcode for OMP statement S to SUBCODE. */ static inline void gimple_omp_set_subcode (gimple s, unsigned int subcode) { /* We only have 16 bits for the subcode. Assert that we are not overflowing it. */ gcc_gimple_checking_assert (subcode < (1 << 16)); s->gsbase.subcode = subcode; } /* Set the nowait flag on OMP_RETURN statement S. */ static inline void gimple_omp_return_set_nowait (gimple s) { GIMPLE_CHECK (s, GIMPLE_OMP_RETURN); s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT; } /* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT flag set. */ static inline bool gimple_omp_return_nowait_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_RETURN); return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0; } /* Return true if OMP section statement G has the GF_OMP_SECTION_LAST flag set. */ static inline bool gimple_omp_section_last_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0; } /* Set the GF_OMP_SECTION_LAST flag on G. */ static inline void gimple_omp_section_set_last (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); g->gsbase.subcode |= GF_OMP_SECTION_LAST; } /* Return true if OMP parallel statement G has the GF_OMP_PARALLEL_COMBINED flag set. */ static inline bool gimple_omp_parallel_combined_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0; } /* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_parallel_set_combined_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); if (combined_p) g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED; else g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED; } /* Return the number of operands for statement GS. */ static inline unsigned gimple_num_ops (const_gimple gs) { return gs->gsbase.num_ops; } /* Set the number of operands for statement GS. */ static inline void gimple_set_num_ops (gimple gs, unsigned num_ops) { gs->gsbase.num_ops = num_ops; } /* Return the array of operands for statement GS. */ static inline tree * gimple_ops (gimple gs) { size_t off; /* All the tuples have their operand vector at the very bottom of the structure. Note that those structures that do not have an operand vector have a zero offset. */ off = gimple_ops_offset_[gimple_statement_structure (gs)]; gcc_gimple_checking_assert (off != 0); return (tree *) ((char *) gs + off); } /* Return operand I for statement GS. */ static inline tree gimple_op (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs))[i]; } else return NULL_TREE; } /* Return a pointer to operand I for statement GS. */ static inline tree * gimple_op_ptr (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs)) + i; } else return NULL; } /* Set operand I of statement GS to OP. */ static inline void gimple_set_op (gimple gs, unsigned i, tree op) { gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs)); /* Note. It may be tempting to assert that OP matches is_gimple_operand, but that would be wrong. Different tuples accept slightly different sets of tree operands. Each caller should perform its own validation. */ gimple_ops (gs)[i] = op; } /* Return true if GS is a GIMPLE_ASSIGN. */ static inline bool is_gimple_assign (const_gimple gs) { return gimple_code (gs) == GIMPLE_ASSIGN; } /* Determine if expression CODE is one of the valid expressions that can be used on the RHS of GIMPLE assignments. */ static inline enum gimple_rhs_class get_gimple_rhs_class (enum tree_code code) { return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code]; } /* Return the LHS of assignment statement GS. */ static inline tree gimple_assign_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 0); } /* Return a pointer to the LHS of assignment statement GS. */ static inline tree * gimple_assign_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of assignment statement GS. */ static inline void gimple_assign_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the first operand on the RHS of assignment statement GS. */ static inline tree gimple_assign_rhs1 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 1); } /* Return a pointer to the first operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs1_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 1); } /* Set RHS to be the first operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs1 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 1, rhs); } /* Return the second operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs2 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 3) return gimple_op (gs, 2); else return NULL_TREE; } /* Return a pointer to the second operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs2_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 2); } /* Set RHS to be the second operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs2 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 2, rhs); } /* Return the third operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs3 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 4) return gimple_op (gs, 3); else return NULL_TREE; } /* Return a pointer to the third operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs3_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 3); } /* Set RHS to be the third operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs3 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 3, rhs); } /* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect to see only a maximum of two operands. */ static inline void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, tree op1, tree op2) { gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL); } /* A wrapper around extract_ops_from_tree_1, for callers which expect to see only a maximum of two operands. */ static inline void extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, tree *op1) { tree op2; extract_ops_from_tree_1 (expr, code, op0, op1, &op2); gcc_assert (op2 == NULL_TREE); } /* Returns true if GS is a nontemporal move. */ static inline bool gimple_assign_nontemporal_move_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gs->gsbase.nontemporal_move; } /* Sets nontemporal move flag of GS to NONTEMPORAL. */ static inline void gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gs->gsbase.nontemporal_move = nontemporal; } /* Return the code of the expression computed on the rhs of assignment statement GS. In case that the RHS is a single object, returns the tree code of the object. */ static inline enum tree_code gimple_assign_rhs_code (const_gimple gs) { enum tree_code code; GIMPLE_CHECK (gs, GIMPLE_ASSIGN); code = (enum tree_code) gs->gsbase.subcode; /* While we initially set subcode to the TREE_CODE of the rhs for GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay in sync when we rewrite stmts into SSA form or do SSA propagations. */ if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS) code = TREE_CODE (gimple_assign_rhs1 (gs)); return code; } /* Set CODE to be the code for the expression computed on the RHS of assignment S. */ static inline void gimple_assign_set_rhs_code (gimple s, enum tree_code code) { GIMPLE_CHECK (s, GIMPLE_ASSIGN); s->gsbase.subcode = code; } /* Return the gimple rhs class of the code of the expression computed on the rhs of assignment statement GS. This will never return GIMPLE_INVALID_RHS. */ static inline enum gimple_rhs_class gimple_assign_rhs_class (const_gimple gs) { return get_gimple_rhs_class (gimple_assign_rhs_code (gs)); } /* Return true if GS is an assignment with a singleton RHS, i.e., there is no operator associated with the assignment itself. Unlike gimple_assign_copy_p, this predicate returns true for any RHS operand, including those that perform an operation and do not have the semantics of a copy, such as COND_EXPR. */ static inline bool gimple_assign_single_p (gimple gs) { return (is_gimple_assign (gs) && gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS); } /* Return true if S is a type-cast assignment. */ static inline bool gimple_assign_cast_p (gimple s) { if (is_gimple_assign (s)) { enum tree_code sc = gimple_assign_rhs_code (s); return CONVERT_EXPR_CODE_P (sc) || sc == VIEW_CONVERT_EXPR || sc == FIX_TRUNC_EXPR; } return false; } /* Return true if GS is a GIMPLE_CALL. */ static inline bool is_gimple_call (const_gimple gs) { return gimple_code (gs) == GIMPLE_CALL; } /* Return the LHS of call statement GS. */ static inline tree gimple_call_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 0); } /* Return a pointer to the LHS of call statement GS. */ static inline tree * gimple_call_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of call statement GS. */ static inline void gimple_call_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the tree node representing the function called by call statement GS. */ static inline tree gimple_call_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 1); } /* Return a pointer to the tree node representing the function called by call statement GS. */ static inline tree * gimple_call_fn_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 1); } /* Set FN to be the function called by call statement GS. */ static inline void gimple_call_set_fn (gimple gs, tree fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 1, fn); } /* Set FNDECL to be the function called by call statement GS. */ static inline void gimple_call_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl)); } /* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it. Otherwise return NULL. This function is analogous to get_callee_fndecl in tree land. */ static inline tree gimple_call_fndecl (const_gimple gs) { tree addr = gimple_call_fn (gs); if (TREE_CODE (addr) == ADDR_EXPR) { tree fndecl = TREE_OPERAND (addr, 0); if (TREE_CODE (fndecl) == MEM_REF) { if (TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR && integer_zerop (TREE_OPERAND (fndecl, 1))) return TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); else return NULL_TREE; } return TREE_OPERAND (addr, 0); } return NULL_TREE; } /* Return the type returned by call statement GS. */ static inline tree gimple_call_return_type (const_gimple gs) { tree fn = gimple_call_fn (gs); tree type = TREE_TYPE (fn); /* See through the pointer. */ type = TREE_TYPE (type); /* The type returned by a FUNCTION_DECL is the type of its function type. */ return TREE_TYPE (type); } /* Return the static chain for call statement GS. */ static inline tree gimple_call_chain (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 2); } /* Return a pointer to the static chain for call statement GS. */ static inline tree * gimple_call_chain_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 2); } /* Set CHAIN to be the static chain for call statement GS. */ static inline void gimple_call_set_chain (gimple gs, tree chain) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 2, chain); } /* Return the number of arguments used by call statement GS. */ static inline unsigned gimple_call_num_args (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_CALL); num_ops = gimple_num_ops (gs); return num_ops - 3; } /* Return the argument at position INDEX for call statement GS. */ static inline tree gimple_call_arg (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, index + 3); } /* Return a pointer to the argument at position INDEX for call statement GS. */ static inline tree * gimple_call_arg_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, index + 3); } /* Set ARG to be the argument at position INDEX for call statement GS. */ static inline void gimple_call_set_arg (gimple gs, unsigned index, tree arg) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, index + 3, arg); } /* If TAIL_P is true, mark call statement S as being a tail call (i.e., a call just before the exit of a function). These calls are candidate for tail call optimization. */ static inline void gimple_call_set_tail (gimple s, bool tail_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (tail_p) s->gsbase.subcode |= GF_CALL_TAILCALL; else s->gsbase.subcode &= ~GF_CALL_TAILCALL; } /* Return true if GIMPLE_CALL S is marked as a tail call. */ static inline bool gimple_call_tail_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0; } /* Set the inlinable status of GIMPLE_CALL S to INLINABLE_P. */ static inline void gimple_call_set_cannot_inline (gimple s, bool inlinable_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (inlinable_p) s->gsbase.subcode |= GF_CALL_CANNOT_INLINE; else s->gsbase.subcode &= ~GF_CALL_CANNOT_INLINE; } /* Return true if GIMPLE_CALL S cannot be inlined. */ static inline bool gimple_call_cannot_inline_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_CANNOT_INLINE) != 0; } /* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return slot optimization. This transformation uses the target of the call expansion as the return slot for calls that return in memory. */ static inline void gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (return_slot_opt_p) s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT; else s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT; } /* Return true if S is marked for return slot optimization. */ static inline bool gimple_call_return_slot_opt_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0; } /* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a thunk to the thunked-to function. */ static inline void gimple_call_set_from_thunk (gimple s, bool from_thunk_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (from_thunk_p) s->gsbase.subcode |= GF_CALL_FROM_THUNK; else s->gsbase.subcode &= ~GF_CALL_FROM_THUNK; } /* Return true if GIMPLE_CALL S is a jump from a thunk. */ static inline bool gimple_call_from_thunk_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0; } /* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline void gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (pass_arg_pack_p) s->gsbase.subcode |= GF_CALL_VA_ARG_PACK; else s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK; } /* Return true if GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline bool gimple_call_va_arg_pack_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0; } /* Return true if S is a noreturn call. */ static inline bool gimple_call_noreturn_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NORETURN) != 0; } /* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw even if the called function can throw in other cases. */ static inline void gimple_call_set_nothrow (gimple s, bool nothrow_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (nothrow_p) s->gsbase.subcode |= GF_CALL_NOTHROW; else s->gsbase.subcode &= ~GF_CALL_NOTHROW; } /* Return true if S is a nothrow call. */ static inline bool gimple_call_nothrow_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NOTHROW) != 0; } /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */ static inline void gimple_call_copy_flags (gimple dest_call, gimple orig_call) { GIMPLE_CHECK (dest_call, GIMPLE_CALL); GIMPLE_CHECK (orig_call, GIMPLE_CALL); dest_call->gsbase.subcode = orig_call->gsbase.subcode; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_use_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_used; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_clobber_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_clobbered; } /* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a non-NULL lhs. */ static inline bool gimple_has_lhs (gimple stmt) { return (is_gimple_assign (stmt) || (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE)); } /* Return the code of the predicate computed by conditional statement GS. */ static inline enum tree_code gimple_cond_code (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return (enum tree_code) gs->gsbase.subcode; } /* Set CODE to be the predicate code for the conditional statement GS. */ static inline void gimple_cond_set_code (gimple gs, enum tree_code code) { GIMPLE_CHECK (gs, GIMPLE_COND); gs->gsbase.subcode = code; } /* Return the LHS of the predicate computed by conditional statement GS. */ static inline tree gimple_cond_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 0); } /* Return the pointer to the LHS of the predicate computed by conditional statement GS. */ static inline tree * gimple_cond_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 0, lhs); } /* Return the RHS operand of the predicate computed by conditional GS. */ static inline tree gimple_cond_rhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 1); } /* Return the pointer to the RHS operand of the predicate computed by conditional GS. */ static inline tree * gimple_cond_rhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 1); } /* Set RHS to be the RHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_rhs (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 1, rhs); } /* Return the label used by conditional statement GS when its predicate evaluates to true. */ static inline tree gimple_cond_true_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 2); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to true. */ static inline void gimple_cond_set_true_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 2, label); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to false. */ static inline void gimple_cond_set_false_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 3, label); } /* Return the label used by conditional statement GS when its predicate evaluates to false. */ static inline tree gimple_cond_false_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 3); } /* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */ static inline void gimple_cond_make_false (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_false_node); gs->gsbase.subcode = EQ_EXPR; } /* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */ static inline void gimple_cond_make_true (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_true_node); gs->gsbase.subcode = EQ_EXPR; } /* Check if conditional statemente GS is of the form 'if (1 == 1)', 'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */ static inline bool gimple_cond_true_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs != rhs) return true; if (code == EQ_EXPR && lhs == rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (1 != 1)', 'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */ static inline bool gimple_cond_false_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs == rhs) return true; if (code == EQ_EXPR && lhs != rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (var != 0)' or 'if (var == 1)' */ static inline bool gimple_cond_single_var_p (gimple gs) { if (gimple_cond_code (gs) == NE_EXPR && gimple_cond_rhs (gs) == boolean_false_node) return true; if (gimple_cond_code (gs) == EQ_EXPR && gimple_cond_rhs (gs) == boolean_true_node) return true; return false; } /* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */ static inline void gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs) { gimple_cond_set_code (stmt, code); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } /* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline tree gimple_label_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_LABEL); return gimple_op (gs, 0); } /* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline void gimple_label_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_LABEL); gimple_set_op (gs, 0, label); } /* Return the destination of the unconditional jump GS. */ static inline tree gimple_goto_dest (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_GOTO); return gimple_op (gs, 0); } /* Set DEST to be the destination of the unconditonal jump GS. */ static inline void gimple_goto_set_dest (gimple gs, tree dest) { GIMPLE_CHECK (gs, GIMPLE_GOTO); gimple_set_op (gs, 0, dest); } /* Return the variables declared in the GIMPLE_BIND statement GS. */ static inline tree gimple_bind_vars (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.vars; } /* Set VARS to be the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = vars; } /* Append VARS to the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_append_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars); } /* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline gimple_seq gimple_bind_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.body; } /* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.body = seq; } /* Append a statement to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_stmt (gimple gs, gimple stmt) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_stmt (&gs->gimple_bind.body, stmt); } /* Append a sequence of statements to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_seq (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_seq (&gs->gimple_bind.body, seq); } /* Return the TREE_BLOCK node associated with GIMPLE_BIND statement GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */ static inline tree gimple_bind_block (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.block; } /* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_block (gimple gs, tree block) { GIMPLE_CHECK (gs, GIMPLE_BIND); gcc_gimple_checking_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK); gs->gimple_bind.block = block; } /* Return the number of input operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_ninputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.ni; } /* Return the number of output operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_noutputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.no; } /* Return the number of clobber operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nclobbers (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nc; } /* Return the number of label operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nlabels (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nl; } /* Return input operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_input_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op (gs, index); } /* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_input_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op_ptr (gs, index); } /* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni && TREE_CODE (in_op) == TREE_LIST); gimple_set_op (gs, index, in_op); } /* Return output operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_output_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op (gs, index + gs->gimple_asm.ni); } /* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_output_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op_ptr (gs, index + gs->gimple_asm.ni); } /* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no && TREE_CODE (out_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni, out_op); } /* Return clobber operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_clobber_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no); } /* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc && TREE_CODE (clobber_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op); } /* Return label operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_label_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc); } /* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl && TREE_CODE (label_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op); } /* Return the string representing the assembly instruction in GIMPLE_ASM GS. */ static inline const char * gimple_asm_string (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.string; } /* Return true if GS is an asm statement marked volatile. */ static inline bool gimple_asm_volatile_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0; } /* If VOLATLE_P is true, mark asm statement GS as volatile. */ static inline void gimple_asm_set_volatile (gimple gs, bool volatile_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (volatile_p) gs->gsbase.subcode |= GF_ASM_VOLATILE; else gs->gsbase.subcode &= ~GF_ASM_VOLATILE; } /* If INPUT_P is true, mark asm GS as an ASM_INPUT. */ static inline void gimple_asm_set_input (gimple gs, bool input_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (input_p) gs->gsbase.subcode |= GF_ASM_INPUT; else gs->gsbase.subcode &= ~GF_ASM_INPUT; } /* Return true if asm GS is an ASM_INPUT. */ static inline bool gimple_asm_input_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_INPUT) != 0; } /* Return the types handled by GIMPLE_CATCH statement GS. */ static inline tree gimple_catch_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.types; } /* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */ static inline tree * gimple_catch_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.types; } /* Return the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq gimple_catch_handler (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.handler; } /* Return a pointer to the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq * gimple_catch_handler_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.handler; } /* Set T to be the set of types handled by GIMPLE_CATCH GS. */ static inline void gimple_catch_set_types (gimple gs, tree t) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.types = t; } /* Set HANDLER to be the body of GIMPLE_CATCH GS. */ static inline void gimple_catch_set_handler (gimple gs, gimple_seq handler) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.handler = handler; } /* Return the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree gimple_eh_filter_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.types; } /* Return a pointer to the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree * gimple_eh_filter_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return &gs->gimple_eh_filter.types; } /* Return the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq gimple_eh_filter_failure (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.failure; } /* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_types (gimple gs, tree types) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.types = types; } /* Set FAILURE to be the sequence of statements to execute on failure for GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_failure (gimple gs, gimple_seq failure) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.failure = failure; } /* Get the function decl to be called by the MUST_NOT_THROW region. */ static inline tree gimple_eh_must_not_throw_fndecl (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); return gs->gimple_eh_mnt.fndecl; } /* Set the function decl to be called by GS to DECL. */ static inline void gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); gs->gimple_eh_mnt.fndecl = decl; } /* GIMPLE_TRY accessors. */ /* Return the kind of try block represented by GIMPLE_TRY GS. This is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */ static inline enum gimple_try_flags gimple_try_kind (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND); } /* Set the kind of try block represented by GIMPLE_TRY GS. */ static inline void gimple_try_set_kind (gimple gs, enum gimple_try_flags kind) { GIMPLE_CHECK (gs, GIMPLE_TRY); gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY); if (gimple_try_kind (gs) != kind) gs->gsbase.subcode = (unsigned int) kind; } /* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline bool gimple_try_catch_is_cleanup (const_gimple gs) { gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH); return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0; } /* Return the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_eval (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.eval; } /* Return the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.cleanup; } /* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline void gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup) { gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH); if (catch_is_cleanup) g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP; else g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP; } /* Set EVAL to be the sequence of statements to use as the body for GIMPLE_TRY GS. */ static inline void gimple_try_set_eval (gimple gs, gimple_seq eval) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.eval = eval; } /* Set CLEANUP to be the sequence of statements to use as the cleanup body for GIMPLE_TRY GS. */ static inline void gimple_try_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.cleanup = cleanup; } /* Return the cleanup sequence for cleanup statement GS. */ static inline gimple_seq gimple_wce_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gimple_wce.cleanup; } /* Set CLEANUP to be the cleanup sequence for GS. */ static inline void gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gimple_wce.cleanup = cleanup; } /* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline bool gimple_wce_cleanup_eh_only (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gsbase.subcode != 0; } /* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline void gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gsbase.subcode = (unsigned int) eh_only_p; } /* Return the maximum number of arguments supported by GIMPLE_PHI GS. */ static inline unsigned gimple_phi_capacity (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.capacity; } /* Return the number of arguments in GIMPLE_PHI GS. This must always be exactly the number of incoming edges for the basic block holding GS. */ static inline unsigned gimple_phi_num_args (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.nargs; } /* Return the SSA name created by GIMPLE_PHI GS. */ static inline tree gimple_phi_result (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.result; } /* Return a pointer to the SSA name created by GIMPLE_PHI GS. */ static inline tree * gimple_phi_result_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return &gs->gimple_phi.result; } /* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */ static inline void gimple_phi_set_result (gimple gs, tree result) { GIMPLE_CHECK (gs, GIMPLE_PHI); gs->gimple_phi.result = result; } /* Return the PHI argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline struct phi_arg_d * gimple_phi_arg (gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity); return &(gs->gimple_phi.args[index]); } /* Set PHIARG to be the argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline void gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs); gs->gimple_phi.args[index] = *phiarg; } /* Return the region number for GIMPLE_RESX GS. */ static inline int gimple_resx_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RESX); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_RESX GS. */ static inline void gimple_resx_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_RESX); gs->gimple_eh_ctrl.region = region; } /* Return the region number for GIMPLE_EH_DISPATCH GS. */ static inline int gimple_eh_dispatch_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */ static inline void gimple_eh_dispatch_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); gs->gimple_eh_ctrl.region = region; } /* Return the number of labels associated with the switch statement GS. */ static inline unsigned gimple_switch_num_labels (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_SWITCH); num_ops = gimple_num_ops (gs); gcc_gimple_checking_assert (num_ops > 1); return num_ops - 1; } /* Set NLABELS to be the number of labels for the switch statement GS. */ static inline void gimple_switch_set_num_labels (gimple g, unsigned nlabels) { GIMPLE_CHECK (g, GIMPLE_SWITCH); gimple_set_num_ops (g, nlabels + 1); } /* Return the index variable used by the switch statement GS. */ static inline tree gimple_switch_index (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op (gs, 0); } /* Return a pointer to the index variable for the switch statement GS. */ static inline tree * gimple_switch_index_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op_ptr (gs, 0); } /* Set INDEX to be the index variable for switch statement GS. */ static inline void gimple_switch_set_index (gimple gs, tree index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index)); gimple_set_op (gs, 0, index); } /* Return the label numbered INDEX. The default label is 0, followed by any labels in a switch statement. */ static inline tree gimple_switch_label (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1); return gimple_op (gs, index + 1); } /* Set the label number INDEX to LABEL. 0 is always the default label. */ static inline void gimple_switch_set_label (gimple gs, unsigned index, tree label) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1 && (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR)); gimple_set_op (gs, index + 1, label); } /* Return the default label for a switch statement. */ static inline tree gimple_switch_default_label (const_gimple gs) { return gimple_switch_label (gs, 0); } /* Set the default label for a switch statement. */ static inline void gimple_switch_set_default_label (gimple gs, tree label) { gimple_switch_set_label (gs, 0, label); } /* Return true if GS is a GIMPLE_DEBUG statement. */ static inline bool is_gimple_debug (const_gimple gs) { return gimple_code (gs) == GIMPLE_DEBUG; } /* Return true if S is a GIMPLE_DEBUG BIND statement. */ static inline bool gimple_debug_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree * gimple_debug_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* The second operand of a GIMPLE_DEBUG_BIND, when the value was optimized away. */ #define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */ /* Remove the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_reset_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE); } /* Return true if the GIMPLE_DEBUG bind statement is bound to a value. */ static inline bool gimple_debug_bind_has_value_p (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE; } #undef GIMPLE_DEBUG_BIND_NOVALUE /* Return the body for the OMP statement GS. */ static inline gimple_seq gimple_omp_body (gimple gs) { return gs->omp.body; } /* Set BODY to be the body for the OMP statement GS. */ static inline void gimple_omp_set_body (gimple gs, gimple_seq body) { gs->omp.body = body; } /* Return the name associated with OMP_CRITICAL statement GS. */ static inline tree gimple_omp_critical_name (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return gs->gimple_omp_critical.name; } /* Return a pointer to the name associated with OMP critical statement GS. */ static inline tree * gimple_omp_critical_name_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return &gs->gimple_omp_critical.name; } /* Set NAME to be the name associated with OMP critical statement GS. */ static inline void gimple_omp_critical_set_name (gimple gs, tree name) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); gs->gimple_omp_critical.name = name; } /* Return the clauses associated with OMP_FOR GS. */ static inline tree gimple_omp_for_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.clauses; } /* Return a pointer to the OMP_FOR GS. */ static inline tree * gimple_omp_for_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return &gs->gimple_omp_for.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */ static inline void gimple_omp_for_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.clauses = clauses; } /* Get the collapse count of OMP_FOR GS. */ static inline size_t gimple_omp_for_collapse (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.collapse; } /* Return the index variable for OMP_FOR GS. */ static inline tree gimple_omp_for_index (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].index; } /* Return a pointer to the index variable for OMP_FOR GS. */ static inline tree * gimple_omp_for_index_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].index; } /* Set INDEX to be the index variable for OMP_FOR GS. */ static inline void gimple_omp_for_set_index (gimple gs, size_t i, tree index) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].index = index; } /* Return the initial value for OMP_FOR GS. */ static inline tree gimple_omp_for_initial (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].initial; } /* Return a pointer to the initial value for OMP_FOR GS. */ static inline tree * gimple_omp_for_initial_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].initial; } /* Set INITIAL to be the initial value for OMP_FOR GS. */ static inline void gimple_omp_for_set_initial (gimple gs, size_t i, tree initial) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].initial = initial; } /* Return the final value for OMP_FOR GS. */ static inline tree gimple_omp_for_final (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].final; } /* Return a pointer to the final value for OMP_FOR GS. */ static inline tree * gimple_omp_for_final_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].final; } /* Set FINAL to be the final value for OMP_FOR GS. */ static inline void gimple_omp_for_set_final (gimple gs, size_t i, tree final) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].final = final; } /* Return the increment value for OMP_FOR GS. */ static inline tree gimple_omp_for_incr (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].incr; } /* Return a pointer to the increment value for OMP_FOR GS. */ static inline tree * gimple_omp_for_incr_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].incr; } /* Set INCR to be the increment value for OMP_FOR GS. */ static inline void gimple_omp_for_set_incr (gimple gs, size_t i, tree incr) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].incr = incr; } /* Return the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq gimple_omp_for_pre_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.pre_body; } /* Set PRE_BODY to be the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline void gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.pre_body = pre_body; } /* Return the clauses associated with OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_task_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_task_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_task_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_task_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_task_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_task_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_task_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_taskreg_clauses (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_clauses_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_clauses (gimple gs, tree clauses) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_taskreg_child_fn (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_child_fn_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_taskreg_data_arg (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_data_arg_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the copy function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_copy_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.copy_fn; } /* Return a pointer to the copy function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_copy_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.copy_fn; } /* Set CHILD_FN to be the copy function for OMP_TASK GS. */ static inline void gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.copy_fn = copy_fn; } /* Return size of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_size (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_size; } /* Return a pointer to the data block size for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_size_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_size; } /* Set ARG_SIZE to be the data block size for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_size (gimple gs, tree arg_size) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_size = arg_size; } /* Return align of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_align (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_align; } /* Return a pointer to the data block align for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_align_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_align; } /* Set ARG_SIZE to be the data block align for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_align (gimple gs, tree arg_align) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_align = arg_align; } /* Return the clauses associated with OMP_SINGLE GS. */ static inline tree gimple_omp_single_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return gs->gimple_omp_single.clauses; } /* Return a pointer to the clauses associated with OMP_SINGLE GS. */ static inline tree * gimple_omp_single_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return &gs->gimple_omp_single.clauses; } /* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */ static inline void gimple_omp_single_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); gs->gimple_omp_single.clauses = clauses; } /* Return the clauses associated with OMP_SECTIONS GS. */ static inline tree gimple_omp_sections_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.clauses; } /* Return a pointer to the clauses associated with OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.clauses; } /* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS GS. */ static inline void gimple_omp_sections_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.clauses = clauses; } /* Return the control variable associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline tree gimple_omp_sections_control (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.control; } /* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_control_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.control; } /* Set CONTROL to be the set of clauses associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline void gimple_omp_sections_set_control (gimple gs, tree control) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.control = control; } /* Set COND to be the condition code for OMP_FOR GS. */ static inline void gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison && i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].cond = cond; } /* Return the condition code associated with OMP_FOR GS. */ static inline enum tree_code gimple_omp_for_cond (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].cond; } /* Set the value being stored in an atomic store. */ static inline void gimple_omp_atomic_store_set_val (gimple g, tree val) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gimple_omp_atomic_store.val = val; } /* Return the value being stored in an atomic store. */ static inline tree gimple_omp_atomic_store_val (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return g->gimple_omp_atomic_store.val; } /* Return a pointer to the value being stored in an atomic store. */ static inline tree * gimple_omp_atomic_store_val_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return &g->gimple_omp_atomic_store.val; } /* Set the LHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_lhs (gimple g, tree lhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.lhs = lhs; } /* Get the LHS of an atomic load. */ static inline tree gimple_omp_atomic_load_lhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.lhs; } /* Return a pointer to the LHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_lhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.lhs; } /* Set the RHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_rhs (gimple g, tree rhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.rhs = rhs; } /* Get the RHS of an atomic load. */ static inline tree gimple_omp_atomic_load_rhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.rhs; } /* Return a pointer to the RHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_rhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.rhs; } /* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_def (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_def; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_def_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_def; } /* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_def (gimple g, tree def) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_def = def; } /* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_use (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_use; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_use_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_use; } /* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_use (gimple g, tree use) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_use = use; } /* Return a pointer to the return value for GIMPLE_RETURN GS. */ static inline tree * gimple_return_retval_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op_ptr (gs, 0); } /* Return the return value for GIMPLE_RETURN GS. */ static inline tree gimple_return_retval (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op (gs, 0); } /* Set RETVAL to be the return value for GIMPLE_RETURN GS. */ static inline void gimple_return_set_retval (gimple gs, tree retval) { GIMPLE_CHECK (gs, GIMPLE_RETURN); gimple_set_op (gs, 0, retval); } /* Returns true when the gimple statment STMT is any of the OpenMP types. */ #define CASE_GIMPLE_OMP \ case GIMPLE_OMP_PARALLEL: \ case GIMPLE_OMP_TASK: \ case GIMPLE_OMP_FOR: \ case GIMPLE_OMP_SECTIONS: \ case GIMPLE_OMP_SECTIONS_SWITCH: \ case GIMPLE_OMP_SINGLE: \ case GIMPLE_OMP_SECTION: \ case GIMPLE_OMP_MASTER: \ case GIMPLE_OMP_ORDERED: \ case GIMPLE_OMP_CRITICAL: \ case GIMPLE_OMP_RETURN: \ case GIMPLE_OMP_ATOMIC_LOAD: \ case GIMPLE_OMP_ATOMIC_STORE: \ case GIMPLE_OMP_CONTINUE static inline bool is_gimple_omp (const_gimple stmt) { switch (gimple_code (stmt)) { CASE_GIMPLE_OMP: return true; default: return false; } } /* Returns TRUE if statement G is a GIMPLE_NOP. */ static inline bool gimple_nop_p (const_gimple g) { return gimple_code (g) == GIMPLE_NOP; } /* Return true if GS is a GIMPLE_RESX. */ static inline bool is_gimple_resx (const_gimple gs) { return gimple_code (gs) == GIMPLE_RESX; } /* Return the predictor of GIMPLE_PREDICT statement GS. */ static inline enum br_predictor gimple_predict_predictor (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN); } /* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */ static inline void gimple_predict_set_predictor (gimple gs, enum br_predictor predictor) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN) | (unsigned) predictor; } /* Return the outcome of GIMPLE_PREDICT statement GS. */ static inline enum prediction gimple_predict_outcome (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN; } /* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */ static inline void gimple_predict_set_outcome (gimple gs, enum prediction outcome) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); if (outcome == TAKEN) gs->gsbase.subcode |= GF_PREDICT_TAKEN; else gs->gsbase.subcode &= ~GF_PREDICT_TAKEN; } /* Return the type of the main expression computed by STMT. Return void_type_node if the statement computes nothing. */ static inline tree gimple_expr_type (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL) { tree type; /* In general we want to pass out a type that can be substituted for both the RHS and the LHS types if there is a possibly useless conversion involved. That means returning the original RHS type as far as we can reconstruct it. */ if (code == GIMPLE_CALL) type = gimple_call_return_type (stmt); else switch (gimple_assign_rhs_code (stmt)) { case POINTER_PLUS_EXPR: type = TREE_TYPE (gimple_assign_rhs1 (stmt)); break; default: /* As fallback use the type of the LHS. */ type = TREE_TYPE (gimple_get_lhs (stmt)); break; } return type; } else if (code == GIMPLE_COND) return boolean_type_node; else return void_type_node; } /* Return a new iterator pointing to GIMPLE_SEQ's first statement. */ static inline gimple_stmt_iterator gsi_start (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the first statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = bb; return i; } /* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */ static inline gimple_stmt_iterator gsi_last (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the last statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = bb; return i; } /* Return true if I is at the end of its sequence. */ static inline bool gsi_end_p (gimple_stmt_iterator i) { return i.ptr == NULL; } /* Return true if I is one statement before the end of its sequence. */ static inline bool gsi_one_before_end_p (gimple_stmt_iterator i) { return i.ptr != NULL && i.ptr->next == NULL; } /* Advance the iterator to the next gimple statement. */ static inline void gsi_next (gimple_stmt_iterator *i) { i->ptr = i->ptr->next; } /* Advance the iterator to the previous gimple statement. */ static inline void gsi_prev (gimple_stmt_iterator *i) { i->ptr = i->ptr->prev; } /* Return the current stmt. */ static inline gimple gsi_stmt (gimple_stmt_iterator i) { return i.ptr->stmt; } /* Return a block statement iterator that points to the first non-label statement in block BB. */ static inline gimple_stmt_iterator gsi_after_labels (basic_block bb) { gimple_stmt_iterator gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL) gsi_next (&gsi); return gsi; } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_next_nondebug (gimple_stmt_iterator *i) { do { gsi_next (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_prev_nondebug (gimple_stmt_iterator *i) { do { gsi_prev (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Return a new iterator pointing to the first non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_start_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_next_nondebug (&i); return i; } /* Return a new iterator pointing to the last non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_last_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_prev_nondebug (&i); return i; } /* Return a pointer to the current stmt. NOTE: You may want to use gsi_replace on the iterator itself, as this performs additional bookkeeping that will not be done if you simply assign through a pointer returned by gsi_stmt_ptr. */ static inline gimple * gsi_stmt_ptr (gimple_stmt_iterator *i) { return &i->ptr->stmt; } /* Return the basic block associated with this iterator. */ static inline basic_block gsi_bb (gimple_stmt_iterator i) { return i.bb; } /* Return the sequence associated with this iterator. */ static inline gimple_seq gsi_seq (gimple_stmt_iterator i) { return i.seq; } enum gsi_iterator_update { GSI_NEW_STMT, /* Only valid when single statement is added, move iterator to it. */ GSI_SAME_STMT, /* Leave the iterator at the same statement. */ GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable for linking other statements in the same direction. */ }; /* In gimple-iterator.c */ gimple_stmt_iterator gsi_start_phis (basic_block); gimple_seq gsi_split_seq_after (gimple_stmt_iterator); gimple_seq gsi_split_seq_before (gimple_stmt_iterator *); void gsi_replace (gimple_stmt_iterator *, gimple, bool); void gsi_insert_before (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_after (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_remove (gimple_stmt_iterator *, bool); gimple_stmt_iterator gsi_for_stmt (gimple); void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *); void gsi_insert_on_edge (edge, gimple); void gsi_insert_seq_on_edge (edge, gimple_seq); basic_block gsi_insert_on_edge_immediate (edge, gimple); basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq); void gsi_commit_one_edge_insert (edge, basic_block *); void gsi_commit_edge_inserts (void); gimple gimple_call_copy_skip_args (gimple, bitmap); /* Convenience routines to walk all statements of a gimple function. Note that this is useful exclusively before the code is converted into SSA form. Once the program is in SSA form, the standard operand interface should be used to analyze/modify statements. */ struct walk_stmt_info { /* Points to the current statement being walked. */ gimple_stmt_iterator gsi; /* Additional data that the callback functions may want to carry through the recursion. */ void *info; /* Pointer map used to mark visited tree nodes when calling walk_tree on each operand. If set to NULL, duplicate tree nodes will be visited more than once. */ struct pointer_set_t *pset; /* Indicates whether the operand being examined may be replaced with something that matches is_gimple_val (if true) or something slightly more complicated (if false). "Something" technically means the common subset of is_gimple_lvalue and is_gimple_rhs, but we never try to form anything more complicated than that, so we don't bother checking. Also note that CALLBACK should update this flag while walking the sub-expressions of a statement. For instance, when walking the statement 'foo (&var)', the flag VAL_ONLY will initially be set to true, however, when walking &var, the operand of that ADDR_EXPR does not need to be a GIMPLE value. */ bool val_only; /* True if we are currently walking the LHS of an assignment. */ bool is_lhs; /* Optional. Set to true by the callback functions if they made any changes. */ bool changed; /* True if we're interested in location information. */ bool want_locations; /* Operand returned by the callbacks. This is set when calling walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback returns non-NULL, this field will contain the tree returned by the last callback. */ tree callback_result; }; /* Callback for walk_gimple_stmt. Called for every statement found during traversal. The first argument points to the statement to walk. The second argument is a flag that the callback sets to 'true' if it the callback handled all the operands and sub-statements of the statement (the default value of this flag is 'false'). The third argument is an anonymous pointer to data to be used by the callback. */ typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *, struct walk_stmt_info *); gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *); #ifdef GATHER_STATISTICS /* Enum and arrays used for allocation stats. Keep in sync with gimple.c:gimple_alloc_kind_names. */ enum gimple_alloc_kind { gimple_alloc_kind_assign, /* Assignments. */ gimple_alloc_kind_phi, /* PHI nodes. */ gimple_alloc_kind_cond, /* Conditionals. */ gimple_alloc_kind_seq, /* Sequences. */ gimple_alloc_kind_rest, /* Everything else. */ gimple_alloc_kind_all }; extern int gimple_alloc_counts[]; extern int gimple_alloc_sizes[]; /* Return the allocation kind for a given stmt CODE. */ static inline enum gimple_alloc_kind gimple_alloc_kind (enum gimple_code code) { switch (code) { case GIMPLE_ASSIGN: return gimple_alloc_kind_assign; case GIMPLE_PHI: return gimple_alloc_kind_phi; case GIMPLE_COND: return gimple_alloc_kind_cond; default: return gimple_alloc_kind_rest; } } #endif /* GATHER_STATISTICS */ extern void dump_gimple_statistics (void); /* In gimple-fold.c. */ void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree); tree gimple_fold_builtin (gimple); bool fold_stmt (gimple_stmt_iterator *); bool fold_stmt_inplace (gimple); tree maybe_fold_offset_to_address (location_t, tree, tree, tree); tree maybe_fold_offset_to_reference (location_t, tree, tree, tree); tree maybe_fold_stmt_addition (location_t, tree, tree, tree); tree get_symbol_constant_value (tree); tree canonicalize_constructor_val (tree); bool may_propagate_address_into_dereference (tree, tree); extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); #endif /* GCC_GIMPLE_H */
potentialKernel.h
#ifndef POTENTIAL_KERNEL_H #define POTENTIAL_KERNEL_H #include "kernels.h" /* Describe the primitive approximation two body kernel */ namespace pimc { template<class V_t> class primitiveApproximationTwoBodyKernel : public kernel2B { public: primitiveApproximationTwoBodyKernel(const std::shared_ptr<V_t> & V_) : V(V_) {}; virtual Real evaluateRectangular( const Eigen::Tensor<Real,3> & tn , const std::array<int,2> & timeRange, const std::array<int,2> & rangeA, const std::array<int,2> & rangeB ) { Real sum2b=0; //#pragma omp parallel for reduction(+:sum2b) schedule(static) collapse(3) for (int t=timeRange[0];t<=timeRange[1];t++) for (int iParticle=rangeA[0];iParticle<=rangeA[1];iParticle++) { for (int jParticle=rangeB[0];jParticle<=rangeB[1];jParticle++) { Real r2=0; for(int d=0;d<DIMENSIONS;d++) { Real diffd=geometry().difference( tn(iParticle,d,t) - tn(jParticle,d,t) ,d); r2+= diffd * diffd; } Real prefactor= ( (t== timeRange[0]) or (t == timeRange[1]) ) ? 0.5 : 1 ; sum2b+=(*V)(std::sqrt(r2))*prefactor; } } return sum2b*timeStep(); } virtual Real evaluateTimeDerivativeRectangular( const Eigen::Tensor<Real,3> & tn , const std::array<int,2> & timeRange, const std::array<int,2> & rangeA, const std::array<int,2> & rangeB ) { return evaluateRectangular(tn,timeRange,rangeA,rangeB)/timeStep(); } virtual Real evaluateTimeDerivativeTriangular( const Eigen::Tensor<Real,3> & tn , const std::array<int,2> & timeRange, const std::array<int,2> & rangeA, const std::array<int,2> & rangeB ) { return evaluateTriangular(tn,timeRange,rangeA,rangeB)/timeStep(); } virtual Real evaluateRectangular( const Eigen::Tensor<Real,3> & tn, const std::array<int,2> & timeRange, const std::array<int,2> & rangeA, const std::array<int,2> & rangeB, const mask_t & mask) { Real sum2b=0; //#pragma omp parallel for reduction(+:sum2b) schedule(static) collapse(3) for (int t=timeRange[0];t<=timeRange[1];t++) for (int iParticle=rangeA[0];iParticle<=rangeA[1];iParticle++) { for (int jParticle=rangeB[0];jParticle<=rangeB[1];jParticle++) { Real r2=0; for(int d=0;d<DIMENSIONS;d++) { Real diffd=geometry().difference( tn(iParticle,d,t) - tn(jParticle,d,t) ,d); r2+= diffd * diffd; } Real ij_mask= 0.5*( mask(iParticle,t) * mask(jParticle,t) + mask(iParticle,t-1) * mask(jParticle,t-1) ) ; Real prefactor= ( (t== timeRange[0]) or (t == timeRange[1]) ) ? 0.5 : 1 ; ij_mask = ((t == timeRange[0]) or (t==timeRange[1])) ? std::min(ij_mask,0.5) : ij_mask ; sum2b+=(*V)(std::sqrt(r2))*ij_mask; } } return sum2b*timeStep(); } virtual Real evaluateTriangular( const Eigen::Tensor<Real,3> & tn , const std::array<int,2> & timeRange, const std::array<int,2> & rangeA, const std::array<int,2> & rangeB, const mask_t & mask ) { Real sum2b=0; //#pragma omp parallel for reduction(+:sum2b) schedule(static) collapse(3) for (int t=timeRange[0];t<=timeRange[1];t++) for (int iParticle=rangeA[0];iParticle<=rangeA[1];iParticle++) { for (int jParticle=rangeB[0];jParticle< iParticle;jParticle++) { Real r2=0; for(int d=0;d<DIMENSIONS;d++) { Real diffd=geometry().difference( tn(iParticle,d,t) - tn(jParticle,d,t) ,d); r2+= diffd * diffd; } Real ij_mask= 0.5*( mask(iParticle,t) * mask(jParticle,t) + mask(iParticle,t-1) * mask(jParticle,t-1) ) ; ij_mask = ((t == timeRange[0]) or (t==timeRange[1])) ? std::min(ij_mask,0.5) : ij_mask ; sum2b+=(*V)(std::sqrt(r2))*ij_mask; } } return sum2b*timeStep(); } virtual Real evaluateTriangular( const Eigen::Tensor<Real,3> & tn , const std::array<int,2> & timeRange, const std::array<int,2> & rangeA, const std::array<int,2> & rangeB ) { Real sum2b=0; //#pragma omp parallel for reduction(+:sum2b) schedule(static) collapse(3) for (int t=timeRange[0];t<=timeRange[1];t++) for (int iParticle=rangeA[0];iParticle<=rangeA[1];iParticle++) { for (int jParticle=rangeB[0];jParticle<iParticle;jParticle++) { Real r2=0; for(int d=0;d<DIMENSIONS;d++) { Real diffd=geometry().difference( tn(iParticle,d,t) - tn(jParticle,d,t), d ); r2+= diffd * diffd; } Real prefactor= ( (t== timeRange[0]) or (t == timeRange[1]) ) ? 0.5 : 1 ; sum2b+=(*V)(std::sqrt(r2))*prefactor; } } return sum2b*timeStep(); } virtual void addForceRectangular(const Eigen::Tensor<Real,3> & tn, const std::array<int,2> & timeRange, const std::array<int,2> & rangeA, const std::array<int,2> & rangeB, Eigen::Tensor<Real,3> & forces) { //#pragma omp parallel for schedule(static) collapse(3) for (int t=timeRange[0];t<=timeRange[1];t++) for (int i=rangeA[0];i<=rangeA[1];i++) { for (int j=rangeB[0];j<=rangeB[1];j++) { Real r2=0; std::array<Real,3> diff; for(int d=0;d<DIMENSIONS;d++) { diff[d]=geometry().difference( tn(i,d,t) - tn(j,d,t) ,d); r2+= diff[d] * diff[d]; } auto r = std::sqrt(r2); auto rInverse = 1./r; auto dVdr = V->radialDerivative(r); for(int d=0;d<DIMENSIONS;d++) { Real tmp=dVdr*diff[d]*rInverse*timeStep(); forces(i,d,t)+=tmp; forces(j,d,t)-=tmp; } } } } virtual void addForceTriangular(const Eigen::Tensor<Real,3> & tn, const std::array<int,2> & timeRange, const std::array<int,2> & rangeA, const std::array<int,2> & rangeB, Eigen::Tensor<Real,3> & forces) { //#pragma omp parallel for schedule(static) collapse(3) for (int t=timeRange[0];t<=timeRange[1];t++) for (int i=rangeA[0];i<=rangeA[1];i++) { for (int j=rangeB[0];j< i ;j++) { Real r2=0; std::array<Real,3> diff; for(int d=0;d<DIMENSIONS;d++) { diff[d]=geometry().difference( tn(i,d,t) - tn(j,d,t) ,d); r2+= diff[d] * diff[d]; } auto r = std::sqrt(r2); auto rInverse = 1./r; auto dVdr = V->radialDerivative(r); for(int d=0;d<DIMENSIONS;d++) { Real tmp=dVdr*diff[d]*rInverse*timeStep(); forces(i,d,t)+=tmp; forces(j,d,t)-=tmp; } } } } private: std::shared_ptr<V_t> V; }; } #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(12*t1+Ny+15,32)),floord(24*t2+Ny+11,32)),floord(24*t1-24*t2+Nz+Ny+13,32));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(32*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(32*t3+Nx+19,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),8*t3+6),128*t4+126);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_unaryop__identity_int16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_bool // op(A') function: GB_tran__identity_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_bool ( int16_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__ldexp_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ldexp_fp64) // A.*B function (eWiseMult): GB (_AemultB_01__ldexp_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__ldexp_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp64) // C=scalar+B GB (_bind1st__ldexp_fp64) // C=scalar+B' GB (_bind1st_tran__ldexp_fp64) // C=A+scalar GB (_bind2nd__ldexp_fp64) // C=A'+scalar GB (_bind2nd_tran__ldexp_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = ldexp (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ldexp (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LDEXP || GxB_NO_FP64 || GxB_NO_LDEXP_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ldexp_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ldexp_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ldexp_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ldexp_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ldexp_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = ldexp (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ldexp_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = ldexp (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexp (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ldexp_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexp (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
relic_cp_rsa.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007-2020 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or modify it under the * terms of the version 2.1 (or later) of the GNU Lesser General Public License * as published by the Free Software Foundation; or version 2.0 of the Apache * License as published by the Apache Software Foundation. See the LICENSE files * for more details. * * RELIC is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the LICENSE files for more details. * * You should have received a copy of the GNU Lesser General Public or the * Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/> * or <https://www.apache.org/licenses/>. */ /** * @file * * Implementation of the RSA cryptosystem. * * @ingroup cp */ #include <string.h> #include "relic_core.h" #include "relic_conf.h" #include "relic_rand.h" #include "relic_bn.h" #include "relic_util.h" #include "relic_cp.h" #include "relic_md.h" #include "relic_multi.h" /*============================================================================*/ /* Private definitions */ /*============================================================================*/ /** * Length of chosen padding scheme. */ #if CP_RSAPD == PKCS1 #define RSA_PAD_LEN (11) #elif CP_RSAPD == PKCS2 #define RSA_PAD_LEN (2 * RLC_MD_LEN + 2) #else #define RSA_PAD_LEN (2) #endif /** * Identifier for encrypted messages. */ #define RSA_PUB (02) /** * Identifier for signed messages. */ #define RSA_PRV (01) /** * Byte used as padding unit. */ #define RSA_PAD (0xFF) /** * Byte used as padding unit in PSS signatures. */ #define RSA_PSS (0xBC) /** * Identifier for encryption. */ #define RSA_ENC 1 /** * Identifier for decryption. */ #define RSA_DEC 2 /** * Identifier for signature. */ #define RSA_SIG 3 /** * Identifier for verification. */ #define RSA_VER 4 /** * Identifier for second encryption step. */ #define RSA_ENC_FIN 5 /** * Identifier for second sining step. */ #define RSA_SIG_FIN 6 /** * Identifier for signature of a precomputed hash. */ #define RSA_SIG_HASH 7 /** * Identifier for verification of a precomputed hash. */ #define RSA_VER_HASH 8 #if CP_RSAPD == BASIC /** * Applies or removes simple encryption padding. * * @param[out] m - the buffer to pad. * @param[out] p_len - the number of added pad bytes. * @param[in] m_len - the message length in bytes. * @param[in] k_len - the key length in bytes. * @param[in] operation - flag to indicate the operation type. * @return RLC_ERR if errors occurred, RLC_OK otherwise. */ static int pad_basic(bn_t m, int *p_len, int m_len, int k_len, int operation) { uint8_t pad = 0; int result = RLC_OK; bn_t t; RLC_TRY { bn_null(t); bn_new(t); switch (operation) { case RSA_ENC: case RSA_SIG: case RSA_SIG_HASH: /* EB = 00 | FF | D. */ bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PAD); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); break; case RSA_DEC: case RSA_VER: case RSA_VER_HASH: /* EB = 00 | FF | D. */ m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (!bn_is_zero(t)) { result = RLC_ERR; } *p_len = 1; do { (*p_len)++; m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad == 0 && m_len > 0); if (pad != RSA_PAD) { result = RLC_ERR; } bn_mod_2b(m, m, (k_len - *p_len) * 8); break; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); } return result; } #endif #if CP_RSAPD == PKCS1 /** * ASN.1 identifier of the hash function SHA-224. */ static const uint8_t sh224_id[] = { 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c }; /** * ASN.1 identifier of the hash function SHA-256. */ static const uint8_t sh256_id[] = { 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 }; /** * ASN.1 identifier of the hash function SHA-384. */ static const uint8_t sh384_id[] = { 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30 }; /** * ASN.1 identifier of the hash function SHA-512. */ static const uint8_t sh512_id[] = { 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40 }; /** * Returns a pointer to the ASN.1 identifier of a hash function according to the * PKCS#1 v1.5 padding standard. * * @param[in] md - the hash function. * @param[in, out] len - the length of the identifier. * @return The pointer to the hash function identifier. */ static uint8_t *hash_id(int md, int *len) { switch (md) { case SH224: *len = sizeof(sh224_id); return (uint8_t *)sh224_id; case SH256: *len = sizeof(sh256_id); return (uint8_t *)sh256_id; case SH384: *len = sizeof(sh384_id); return (uint8_t *)sh384_id; case SH512: *len = sizeof(sh512_id); return (uint8_t *)sh512_id; default: RLC_THROW(ERR_NO_VALID); return NULL; } } /** * Applies or removes a PKCS#1 v1.5 encryption padding. * * @param[out] m - the buffer to pad. * @param[out] p_len - the number of added pad bytes. * @param[in] m_len - the message length in bytes. * @param[in] k_len - the key length in bytes. * @param[in] operation - flag to indicate the operation type. * @return RLC_ERR if errors occurred, RLC_OK otherwise. */ static int pad_pkcs1(bn_t m, int *p_len, int m_len, int k_len, int operation) { uint8_t *id, pad = 0; int len, result = RLC_OK; bn_t t; bn_null(t); RLC_TRY { bn_new(t); switch (operation) { case RSA_ENC: /* EB = 00 | 02 | PS | 00 | D. */ bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PUB); *p_len = k_len - 3 - m_len; for (int i = 0; i < *p_len; i++) { bn_lsh(m, m, 8); do { rand_bytes(&pad, 1); } while (pad == 0); bn_add_dig(m, m, pad); } bn_lsh(m, m, 8); bn_add_dig(m, m, 0); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); break; case RSA_DEC: m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (!bn_is_zero(t)) { result = RLC_ERR; } *p_len = m_len; m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; if (pad != RSA_PUB) { result = RLC_ERR; } do { m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad != 0 && m_len > 0); /* Remove padding and trailing zero. */ *p_len -= (m_len - 1); bn_mod_2b(m, m, (k_len - *p_len) * 8); break; case RSA_SIG: /* EB = 00 | 01 | PS | 00 | D. */ id = hash_id(MD_MAP, &len); bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PRV); *p_len = k_len - 3 - m_len - len; for (int i = 0; i < *p_len; i++) { bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PAD); } bn_lsh(m, m, 8); bn_add_dig(m, m, 0); bn_lsh(m, m, 8 * len); bn_read_bin(t, id, len); bn_add(m, m, t); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); break; case RSA_SIG_HASH: /* EB = 00 | 01 | PS | 00 | D. */ bn_zero(m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PRV); *p_len = k_len - 3 - m_len; for (int i = 0; i < *p_len; i++) { bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PAD); } bn_lsh(m, m, 8); bn_add_dig(m, m, 0); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); break; case RSA_VER: m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (!bn_is_zero(t)) { result = RLC_ERR; } m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; if (pad != RSA_PRV) { result = RLC_ERR; } do { m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad != 0 && m_len > 0); if (m_len == 0) { result = RLC_ERR; } /* Remove padding and trailing zero. */ id = hash_id(MD_MAP, &len); m_len -= len; bn_rsh(t, m, m_len * 8); int r = 0; for (int i = 0; i < len; i++) { pad = (uint8_t)t->dp[0]; r |= pad - id[len - i - 1]; bn_rsh(t, t, 8); } *p_len = k_len - m_len; bn_mod_2b(m, m, m_len * 8); result = (r == 0 ? RLC_OK : RLC_ERR); break; case RSA_VER_HASH: m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (!bn_is_zero(t)) { result = RLC_ERR; } m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; if (pad != RSA_PRV) { result = RLC_ERR; } do { m_len--; bn_rsh(t, m, 8 * m_len); pad = (uint8_t)t->dp[0]; } while (pad != 0 && m_len > 0); if (m_len == 0) { result = RLC_ERR; } /* Remove padding and trailing zero. */ *p_len = k_len - m_len; bn_mod_2b(m, m, m_len * 8); break; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); } return result; } #endif #if CP_RSAPD == PKCS2 /** * Applies or removes a PKCS#1 v2.1 encryption padding. * * @param[out] m - the buffer to pad. * @param[out] p_len - the number of added pad bytes. * @param[in] m_len - the message length in bytes. * @param[in] k_len - the key length in bytes. * @param[in] operation - flag to indicate the operation type. * @return RLC_ERR if errors occurred, RLC_OK otherwise. */ static int pad_pkcs2(bn_t m, int *p_len, int m_len, int k_len, int operation) { uint8_t pad, h1[RLC_MD_LEN], h2[RLC_MD_LEN]; /* Chia - MSVC does not allow dynamic stack arrays */ uint8_t *mask = (uint8_t *)calloc(k_len, sizeof(uint8_t)); int result = RLC_OK; bn_t t; bn_null(t); RLC_TRY { bn_new(t); switch (operation) { case RSA_ENC: /* DB = lHash | PS | 01 | D. */ md_map(h1, NULL, 0); bn_read_bin(m, h1, RLC_MD_LEN); *p_len = k_len - 2 * RLC_MD_LEN - 2 - m_len; bn_lsh(m, m, *p_len * 8); bn_lsh(m, m, 8); bn_add_dig(m, m, 0x01); /* Make room for the real message. */ bn_lsh(m, m, m_len * 8); break; case RSA_ENC_FIN: /* EB = 00 | maskedSeed | maskedDB. */ rand_bytes(h1, RLC_MD_LEN); md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN); bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1); for (int i = 0; i < t->used; i++) { m->dp[i] ^= t->dp[i]; } bn_write_bin(mask, k_len - RLC_MD_LEN - 1, m); md_mgf(h2, RLC_MD_LEN, mask, k_len - RLC_MD_LEN - 1); for (int i = 0; i < RLC_MD_LEN; i++) { h1[i] ^= h2[i]; } bn_read_bin(t, h1, RLC_MD_LEN); bn_lsh(t, t, 8 * (k_len - RLC_MD_LEN - 1)); bn_add(t, t, m); bn_copy(m, t); break; case RSA_DEC: m_len = k_len - 1; bn_rsh(t, m, 8 * m_len); if (!bn_is_zero(t)) { result = RLC_ERR; } m_len -= RLC_MD_LEN; bn_rsh(t, m, 8 * m_len); bn_write_bin(h1, RLC_MD_LEN, t); bn_mod_2b(m, m, 8 * m_len); bn_write_bin(mask, m_len, m); md_mgf(h2, RLC_MD_LEN, mask, m_len); for (int i = 0; i < RLC_MD_LEN; i++) { h1[i] ^= h2[i]; } md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN); bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1); for (int i = 0; i < t->used; i++) { m->dp[i] ^= t->dp[i]; } m_len -= RLC_MD_LEN; bn_rsh(t, m, 8 * m_len); bn_write_bin(h2, RLC_MD_LEN, t); md_map(h1, NULL, 0); pad = 0; for (int i = 0; i < RLC_MD_LEN; i++) { pad |= h1[i] - h2[i]; } if (result == RLC_OK) { result = (pad ? RLC_ERR : RLC_OK); } bn_mod_2b(m, m, 8 * m_len); *p_len = bn_size_bin(m); (*p_len)--; bn_rsh(t, m, *p_len * 8); if (bn_cmp_dig(t, 1) != RLC_EQ) { result = RLC_ERR; } bn_mod_2b(m, m, *p_len * 8); *p_len = k_len - *p_len; break; case RSA_SIG: case RSA_SIG_HASH: /* M' = 00 00 00 00 00 00 00 00 | H(M). */ bn_zero(m); bn_lsh(m, m, 64); /* Make room for the real message. */ bn_lsh(m, m, RLC_MD_LEN * 8); break; case RSA_SIG_FIN: memset(mask, 0, 8); bn_write_bin(mask + 8, RLC_MD_LEN, m); md_map(h1, mask, RLC_MD_LEN + 8); bn_read_bin(m, h1, RLC_MD_LEN); md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN); bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1); t->dp[0] ^= 0x01; /* m_len is now the size in bits of the modulus. */ bn_lsh(t, t, 8 * RLC_MD_LEN); bn_add(m, t, m); bn_lsh(m, m, 8); bn_add_dig(m, m, RSA_PSS); for (int i = m_len - 1; i < 8 * k_len; i++) { bn_set_bit(m, i, 0); } break; case RSA_VER: case RSA_VER_HASH: bn_mod_2b(t, m, 8); if (bn_cmp_dig(t, RSA_PSS) != RLC_EQ) { result = RLC_ERR; } else { for (int i = m_len; i < 8 * k_len; i++) { if (bn_get_bit(m, i) != 0) { result = RLC_ERR; } } bn_rsh(m, m, 8); bn_mod_2b(t, m, 8 * RLC_MD_LEN); bn_write_bin(h2, RLC_MD_LEN, t); bn_rsh(m, m, 8 * RLC_MD_LEN); bn_write_bin(h1, RLC_MD_LEN, t); md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN); bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1); for (int i = 0; i < t->used; i++) { m->dp[i] ^= t->dp[i]; } m->dp[0] ^= 0x01; for (int i = m_len - 1; i < 8 * k_len; i++) { bn_set_bit(m, i - ((RLC_MD_LEN + 1) * 8), 0); } if (!bn_is_zero(m)) { result = RLC_ERR; } bn_read_bin(m, h2, RLC_MD_LEN); *p_len = k_len - RLC_MD_LEN; } break; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); } free(mask); return result; } #endif /*============================================================================*/ /* Public definitions */ /*============================================================================*/ int cp_rsa_gen(rsa_t pub, rsa_t prv, int bits) { bn_t t, r; int result = RLC_OK; if (pub == NULL || prv == NULL || bits == 0) { return RLC_ERR; } bn_null(t); bn_null(r); RLC_TRY { bn_new(t); bn_new(r); /* Generate different primes p and q. */ do { bn_gen_prime(prv->crt->p, bits / 2); bn_gen_prime(prv->crt->q, bits / 2); } while (bn_cmp(prv->crt->p, prv->crt->q) == RLC_EQ); /* Swap p and q so that p is smaller. */ if (bn_cmp(prv->crt->p, prv->crt->q) != RLC_LT) { bn_copy(t, prv->crt->p); bn_copy(prv->crt->p, prv->crt->q); bn_copy(prv->crt->q, t); } /* n = pq. */ bn_mul(pub->crt->n, prv->crt->p, prv->crt->q); bn_copy(prv->crt->n, pub->crt->n); bn_sub_dig(prv->crt->p, prv->crt->p, 1); bn_sub_dig(prv->crt->q, prv->crt->q, 1); /* phi(n) = (p - 1)(q - 1). */ bn_mul(t, prv->crt->p, prv->crt->q); bn_set_2b(pub->e, 16); bn_add_dig(pub->e, pub->e, 1); #if !defined(CP_CRT) /* d = e^(-1) mod phi(n). */ bn_gcd_ext(r, prv->d, NULL, pub->e, t); if (bn_sign(prv->d) == RLC_NEG) { bn_add(prv->d, prv->d, t); } if (bn_cmp_dig(r, 1) == RLC_EQ) { /* Restore p and q. */ bn_add_dig(prv->crt->p, prv->crt->p, 1); bn_add_dig(prv->crt->q, prv->crt->q, 1); result = RLC_OK; } #else /* d = e^(-1) mod phi(n). */ bn_gcd_ext(r, prv->d, NULL, pub->e, t); if (bn_sign(prv->d) == RLC_NEG) { bn_add(prv->d, prv->d, t); } if (bn_cmp_dig(r, 1) == RLC_EQ) { /* dP = d mod (p - 1). */ bn_mod(prv->crt->dp, prv->d, prv->crt->p); /* dQ = d mod (q - 1). */ bn_mod(prv->crt->dq, prv->d, prv->crt->q); /* Restore p and q. */ bn_add_dig(prv->crt->p, prv->crt->p, 1); bn_add_dig(prv->crt->q, prv->crt->q, 1); /* qInv = q^(-1) mod p. */ bn_mod_inv(prv->crt->qi, prv->crt->q, prv->crt->p); result = RLC_OK; } #endif /* CP_CRT */ } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(t); bn_free(r); } return result; } int cp_rsa_enc(uint8_t *out, int *out_len, uint8_t *in, int in_len, rsa_t pub) { bn_t m, eb; int size, pad_len, result = RLC_OK; bn_null(m); bn_null(eb); size = bn_size_bin(pub->crt->n); if (pub == NULL || in_len <= 0 || in_len > (size - RSA_PAD_LEN)) { return RLC_ERR; } RLC_TRY { bn_new(m); bn_new(eb); bn_zero(m); bn_zero(eb); #if CP_RSAPD == BASIC if (pad_basic(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) { #elif CP_RSAPD == PKCS1 if (pad_pkcs1(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) { #elif CP_RSAPD == PKCS2 if (pad_pkcs2(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) { #endif bn_read_bin(m, in, in_len); bn_add(eb, eb, m); #if CP_RSAPD == PKCS2 pad_pkcs2(eb, &pad_len, in_len, size, RSA_ENC_FIN); #endif bn_mxp(eb, eb, pub->e, pub->crt->n); if (size <= *out_len) { *out_len = size; memset(out, 0, *out_len); bn_write_bin(out, size, eb); } else { result = RLC_ERR; } } else { result = RLC_ERR; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(m); bn_free(eb); } return result; } int cp_rsa_dec(uint8_t *out, int *out_len, uint8_t *in, int in_len, rsa_t prv) { bn_t m, eb; int size, pad_len, result = RLC_OK; bn_null(m); bn_null(eb); size = bn_size_bin(prv->crt->n); if (prv == NULL || in_len != size || in_len < RSA_PAD_LEN) { return RLC_ERR; } RLC_TRY { bn_new(m); bn_new(eb); bn_read_bin(eb, in, in_len); #if !defined(CP_CRT) bn_mxp(eb, eb, prv->d, prv->crt->n); #else bn_copy(m, eb); #if MULTI == OPENMP omp_set_num_threads(CORES); #pragma omp parallel copyin(core_ctx) firstprivate(prv) { #pragma omp sections { #pragma omp section { #endif /* m1 = c^dP mod p. */ bn_mxp(eb, eb, prv->crt->dp, prv->crt->p); #if MULTI == OPENMP } #pragma omp section { #endif /* m2 = c^dQ mod q. */ bn_mxp(m, m, prv->crt->dq, prv->crt->q); #if MULTI == OPENMP } } } #endif /* m1 = m1 - m2 mod p. */ bn_sub(eb, eb, m); while (bn_sign(eb) == RLC_NEG) { bn_add(eb, eb, prv->crt->p); } bn_mod(eb, eb, prv->crt->p); /* m1 = qInv(m1 - m2) mod p. */ bn_mul(eb, eb, prv->crt->qi); bn_mod(eb, eb, prv->crt->p); /* m = m2 + m1 * q. */ bn_mul(eb, eb, prv->crt->q); bn_add(eb, eb, m); #endif /* CP_CRT */ if (bn_cmp(eb, prv->crt->n) != RLC_LT) { result = RLC_ERR; } #if CP_RSAPD == BASIC if (pad_basic(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) { #elif CP_RSAPD == PKCS1 if (pad_pkcs1(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) { #elif CP_RSAPD == PKCS2 if (pad_pkcs2(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) { #endif size = size - pad_len; if (size <= *out_len) { memset(out, 0, size); bn_write_bin(out, size, eb); *out_len = size; } else { result = RLC_ERR; } } else { result = RLC_ERR; } } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(m); bn_free(eb); } return result; } int cp_rsa_sig(uint8_t *sig, int *sig_len, uint8_t *msg, int msg_len, int hash, rsa_t prv) { bn_t m, eb; int pad_len, size, result = RLC_OK; uint8_t h[RLC_MD_LEN]; if (prv == NULL || msg_len < 0) { return RLC_ERR; } pad_len = (!hash ? RLC_MD_LEN : msg_len); #if CP_RSAPD == PKCS2 size = bn_bits(prv->crt->n) - 1; size = (size / 8) + (size % 8 > 0); if (pad_len > (size - 2)) { return RLC_ERR; } #else size = bn_size_bin(prv->crt->n); if (pad_len > (size - RSA_PAD_LEN)) { return RLC_ERR; } #endif bn_null(m); bn_null(eb); RLC_TRY { bn_new(m); bn_new(eb); bn_zero(m); bn_zero(eb); int operation = (!hash ? RSA_SIG : RSA_SIG_HASH); #if CP_RSAPD == BASIC if (pad_basic(eb, &pad_len, pad_len, size, operation) == RLC_OK) { #elif CP_RSAPD == PKCS1 if (pad_pkcs1(eb, &pad_len, pad_len, size, operation) == RLC_OK) { #elif CP_RSAPD == PKCS2 if (pad_pkcs2(eb, &pad_len, pad_len, size, operation) == RLC_OK) { #endif if (!hash) { md_map(h, msg, msg_len); bn_read_bin(m, h, RLC_MD_LEN); bn_add(eb, eb, m); } else { bn_read_bin(m, msg, msg_len); bn_add(eb, eb, m); } #if CP_RSAPD == PKCS2 pad_pkcs2(eb, &pad_len, bn_bits(prv->crt->n), size, RSA_SIG_FIN); #endif bn_copy(m, eb); #if !defined(CP_CRT) bn_mxp(eb, eb, prv->d, prv->crt->n); #else /* CP_CRT */ #if MULTI == OPENMP omp_set_num_threads(CORES); #pragma omp parallel copyin(core_ctx) firstprivate(prv) { #pragma omp sections { #pragma omp section { #endif /* m1 = c^dP mod p. */ bn_mxp(eb, eb, prv->crt->dp, prv->crt->p); #if MULTI == OPENMP } #pragma omp section { #endif /* m2 = c^dQ mod q. */ bn_mxp(m, m, prv->crt->dq, prv->crt->q); #if MULTI == OPENMP } } } #endif /* m1 = m1 - m2 mod p. */ bn_sub(eb, eb, m); while (bn_sign(eb) == RLC_NEG) { bn_add(eb, eb, prv->crt->p); } bn_mod(eb, eb, prv->crt->p); /* m1 = qInv(m1 - m2) mod p. */ bn_mul(eb, eb, prv->crt->qi); bn_mod(eb, eb, prv->crt->p); /* m = m2 + m1 * q. */ bn_mul(eb, eb, prv->crt->q); bn_add(eb, eb, m); bn_mod(eb, eb, prv->crt->n); #endif /* CP_CRT */ size = bn_size_bin(prv->crt->n); if (size <= *sig_len) { memset(sig, 0, size); bn_write_bin(sig, size, eb); *sig_len = size; } else { result = RLC_ERR; } } else { result = RLC_ERR; } } RLC_CATCH_ANY { RLC_THROW(ERR_CAUGHT); } RLC_FINALLY { bn_free(m); bn_free(eb); } return result; } int cp_rsa_ver(uint8_t *sig, int sig_len, uint8_t *msg, int msg_len, int hash, rsa_t pub) { bn_t m, eb; int size, pad_len, result; uint8_t *h1 = RLC_ALLOCA(uint8_t, RLC_MAX(msg_len, RLC_MD_LEN) + 8); uint8_t *h2 = RLC_ALLOCA(uint8_t, RLC_MAX(msg_len, RLC_MD_LEN)); /* We suppose that the signature is invalid. */ result = 0; if (h1 == NULL || h2 == NULL) { RLC_FREE(h1); RLC_FREE(h2); return 0; } if (pub == NULL || msg_len < 0) { return 0; } pad_len = (!hash ? RLC_MD_LEN : msg_len); #if CP_RSAPD == PKCS2 size = bn_bits(pub->crt->n) - 1; if (size % 8 == 0) { size = size / 8 - 1; } else { size = bn_size_bin(pub->crt->n); } if (pad_len > (size - 2)) { return 0; } #else size = bn_size_bin(pub->crt->n); if (pad_len > (size - RSA_PAD_LEN)) { return 0; } #endif bn_null(m); bn_null(eb); RLC_TRY { bn_new(m); bn_new(eb); bn_read_bin(eb, sig, sig_len); bn_mxp(eb, eb, pub->e, pub->crt->n); int operation = (!hash ? RSA_VER : RSA_VER_HASH); #if CP_RSAPD == BASIC if (pad_basic(eb, &pad_len, RLC_MD_LEN, size, operation) == RLC_OK) { #elif CP_RSAPD == PKCS1 if (pad_pkcs1(eb, &pad_len, RLC_MD_LEN, size, operation) == RLC_OK) { #elif CP_RSAPD == PKCS2 if (pad_pkcs2(eb, &pad_len, bn_bits(pub->crt->n), size, operation) == RLC_OK) { #endif #if CP_RSAPD == PKCS2 memset(h1, 0, 8); if (!hash) { md_map(h1 + 8, msg, msg_len); md_map(h2, h1, RLC_MD_LEN + 8); memset(h1, 0, RLC_MD_LEN); bn_write_bin(h1, size - pad_len, eb); /* Everything went ok, so signature status is changed. */ result = util_cmp_const(h1, h2, RLC_MD_LEN); } else { memcpy(h1 + 8, msg, msg_len); md_map(h2, h1, RLC_MD_LEN + 8); memset(h1, 0, msg_len); bn_write_bin(h1, size - pad_len, eb); /* Everything went ok, so signature status is changed. */ result = util_cmp_const(h1, h2, msg_len); } #else memset(h1, 0, RLC_MAX(msg_len, RLC_MD_LEN)); bn_write_bin(h1, size - pad_len, eb); if (!hash) { md_map(h2, msg, msg_len); /* Everything went ok, so signature status is changed. */ result = util_cmp_const(h1, h2, RLC_MD_LEN); } else { /* Everything went ok, so signature status is changed. */ result = util_cmp_const(h1, msg, msg_len); } #endif result = (result == RLC_EQ ? 1 : 0); } else { result = 0; } } RLC_CATCH_ANY { result = 0; } RLC_FINALLY { bn_free(m); bn_free(eb); RLC_FREE(h1); RLC_FREE(h2); } return result; }
simd_utils_sse_int32.h
/* * Project : SIMD_Utils * Version : 0.2.2 * Author : JishinMaster * Licence : BSD-2 */ #pragma once #include <stdint.h> #ifndef ARM #include <immintrin.h> #else #include "sse2neon_wrapper.h" #endif static inline void add128s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / SSE_LEN_INT32; stop_len *= SSE_LEN_INT32; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), SSE_LEN_BYTES)) { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_store_si128((__m128i *) (dst + i), _mm_add_epi32(_mm_load_si128((__m128i *) (src1 + i)), _mm_load_si128((__m128i *) (src2 + i)))); } } else { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_storeu_si128((__m128i *) (dst + i), _mm_add_epi32(_mm_loadu_si128((__m128i *) (src1 + i)), _mm_loadu_si128((__m128i *) (src2 + i)))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] + src2[i]; } } // result is wrong, the instruction casts to 64bit #if 0 static inline void mul128s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / SSE_LEN_INT32; stop_len *= SSE_LEN_INT32; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), SSE_LEN_BYTES)) { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_store_si128((__m128i *) dst + i, _mm_mul_epi32(_mm_load_si128((__m128i *) (src1 + i)), _mm_load_si128((__m128i *) (src2 + i)))); } } else { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_storeu_si128((__m128i *) dst + i, _mm_mul_epi32(_mm_loadu_si128((__m128i *) (src1 + i)), _mm_loadu_si128((__m128i *) (src2 + i)))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] * src2[i]; } } #endif static inline void sub128s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / SSE_LEN_INT32; stop_len *= SSE_LEN_INT32; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), SSE_LEN_BYTES)) { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_store_si128((__m128i *) (dst + i), _mm_sub_epi32(_mm_load_si128((__m128i *) (src1 + i)), _mm_load_si128((__m128i *) (src2 + i)))); } } else { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_storeu_si128((__m128i *) (dst + i), _mm_sub_epi32(_mm_loadu_si128((__m128i *) (src1 + i)), _mm_loadu_si128((__m128i *) (src2 + i)))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] - src2[i]; } } static inline void addc128s(int32_t *src, int32_t value, int32_t *dst, int len) { int stop_len = len / SSE_LEN_INT32; stop_len *= SSE_LEN_INT32; const v4si tmp = _mm_set1_epi32(value); if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), SSE_LEN_BYTES)) { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_store_si128((__m128i *) (dst + i), _mm_add_epi32(tmp, _mm_load_si128((__m128i *) (src + i)))); } } else { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_storeu_si128((__m128i *) (dst + i), _mm_add_epi32(tmp, _mm_loadu_si128((__m128i *) (src + i)))); } } for (int i = stop_len; i < len; i++) { dst[i] = src[i] + value; } } static inline void vectorSlope128s(int *dst, int len, int offset, int slope) { v4si coef = _mm_set_epi32(3 * slope, 2 * slope, slope, 0); v4si slope8_vec = _mm_set1_epi32(8 * slope); v4si curVal = _mm_add_epi32(_mm_set1_epi32(offset), coef); v4si curVal2 = _mm_add_epi32(_mm_set1_epi32(offset), coef); curVal2 = _mm_add_epi32(curVal2, _mm_set1_epi32(4 * slope)); int stop_len = len / (2 * SSE_LEN_INT32); stop_len *= (2 * SSE_LEN_INT32); if (isAligned((uintptr_t) (dst), SSE_LEN_BYTES)) { _mm_store_si128((__m128i *) dst, curVal); _mm_store_si128((__m128i *) (dst + SSE_LEN_INT32), curVal2); } else { _mm_storeu_si128((__m128i *) dst, curVal); _mm_storeu_si128((__m128i *) (dst + SSE_LEN_INT32), curVal2); } if (isAligned((uintptr_t) (dst), SSE_LEN_BYTES)) { for (int i = 2 * SSE_LEN_INT32; i < stop_len; i += 2 * SSE_LEN_INT32) { curVal = _mm_add_epi32(curVal, slope8_vec); _mm_store_si128((__m128i *) (dst + i), curVal); curVal2 = _mm_add_epi32(curVal2, slope8_vec); _mm_store_si128((__m128i *) (dst + i + SSE_LEN_INT32), curVal2); } } else { for (int i = 2 * SSE_LEN_INT32; i < stop_len; i += 2 * SSE_LEN_INT32) { curVal = _mm_add_epi32(curVal, slope8_vec); _mm_storeu_si128((__m128i *) (dst + i), curVal); curVal2 = _mm_add_epi32(curVal2, slope8_vec); _mm_storeu_si128((__m128i *) (dst + i + SSE_LEN_INT32), curVal2); } } for (int i = stop_len; i < len; i++) { dst[i] = offset + slope * i; } } static inline void sum128s(int32_t *src, int32_t *dst, int len) { int stop_len = len / (2 * SSE_LEN_INT32); stop_len *= (2 * SSE_LEN_INT32); __attribute__((aligned(SSE_LEN_BYTES))) int32_t accumulate[SSE_LEN_INT32] = {0, 0, 0, 0}; int32_t tmp_acc = 0; v4si vec_acc1 = _mm_setzero_si128(); // initialize the vector accumulator v4si vec_acc2 = _mm_setzero_si128(); // initialize the vector accumulator if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), SSE_LEN_BYTES)) { for (int i = 0; i < stop_len; i += 2 * SSE_LEN_INT32) { v4si vec_tmp1 = _mm_load_si128((__m128i *) (src + i)); vec_acc1 = _mm_add_epi32(vec_acc1, vec_tmp1); v4si vec_tmp2 = _mm_load_si128((__m128i *) (src + i + SSE_LEN_INT32)); vec_acc2 = _mm_add_epi32(vec_acc2, vec_tmp2); } } else { for (int i = 0; i < stop_len; i += 2 * SSE_LEN_INT32) { v4si vec_tmp1 = _mm_loadu_si128((__m128i *) (src + i)); vec_acc1 = _mm_add_epi32(vec_acc1, vec_tmp1); v4si vec_tmp2 = _mm_load_si128((__m128i *) (src + i + SSE_LEN_INT32)); vec_acc2 = _mm_add_epi32(vec_acc2, vec_tmp2); } } vec_acc1 = _mm_add_epi32(vec_acc1, vec_acc2); _mm_store_si128((__m128i *) accumulate, vec_acc1); for (int i = stop_len; i < len; i++) { tmp_acc += src[i]; } tmp_acc = tmp_acc + accumulate[0] + accumulate[1] + accumulate[2] + accumulate[3]; *dst = tmp_acc; } // Experimental static inline void copy128s(int32_t *src, int32_t *dst, int len) { int stop_len = len / SSE_LEN_INT32; stop_len *= SSE_LEN_INT32; #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_store_si128((__m128i *) (dst + i), _mm_load_si128((__m128i *) (src + i))); } for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void copy128s_2(int32_t *src, int32_t *dst, int len) { int stop_len = len / (2 * SSE_LEN_INT32); stop_len *= (2 * SSE_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 2 * SSE_LEN_INT32) { __m128i tmp1 = _mm_load_si128((__m128i *) (src + i)); __m128i tmp2 = _mm_load_si128((__m128i *) (src + i + SSE_LEN_INT32)); _mm_store_si128((__m128i *) (dst + i), tmp1); _mm_store_si128((__m128i *) (dst + i + SSE_LEN_INT32), tmp2); } for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy128s(int32_t *src, int32_t *dst, int len) { int stop_len = len / SSE_LEN_INT32; stop_len *= SSE_LEN_INT32; #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { _mm_stream_si128((__m128i *) (dst + i), _mm_stream_load_si128((__m128i *) (src + i))); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy128s_2(int32_t *src, int32_t *dst, int len) { int stop_len = len / (2 * SSE_LEN_INT32); stop_len *= (2 * SSE_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 2 * SSE_LEN_INT32) { __m128i tmp1 = _mm_stream_load_si128((__m128i *) (src + i)); __m128i tmp2 = _mm_stream_load_si128((__m128i *) (src + i + SSE_LEN_INT32)); _mm_stream_si128((__m128i *) (dst + i), tmp1); _mm_stream_si128((__m128i *) (dst + i + SSE_LEN_INT32), tmp2); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy128s_4(int32_t *src, int32_t *dst, int len) { int stop_len = len / (4 * SSE_LEN_INT32); stop_len *= (4 * SSE_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 4 * SSE_LEN_INT32) { __m128i tmp1 = _mm_stream_load_si128((__m128i *) (src + i)); __m128i tmp2 = _mm_stream_load_si128((__m128i *) (src + i + SSE_LEN_INT32)); __m128i tmp3 = _mm_stream_load_si128((__m128i *) (src + i + 2 * SSE_LEN_INT32)); __m128i tmp4 = _mm_stream_load_si128((__m128i *) (src + i + 3 * SSE_LEN_INT32)); _mm_stream_si128((__m128i *) (dst + i), tmp1); _mm_stream_si128((__m128i *) (dst + i + SSE_LEN_INT32), tmp2); _mm_stream_si128((__m128i *) (dst + i + 2 * SSE_LEN_INT32), tmp3); _mm_stream_si128((__m128i *) (dst + i + 3 * SSE_LEN_INT32), tmp4); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } // Adapted from NEON2SSE (does not exists for X86) static inline __m128i _mm_absdiff_epi16(__m128i a, __m128i b) { #ifndef ARM __m128i cmp, difab, difba; cmp = _mm_cmpgt_epi16(a, b); difab = _mm_sub_epi16(a, b); difba = _mm_sub_epi16(b, a); difab = _mm_and_si128(cmp, difab); difba = _mm_andnot_si128(cmp, difba); return _mm_or_si128(difab, difba); #else return vreinterpretq_m128i_s16(vabdq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); #endif } // Adapted from NEON2SSE (does not exists for X86) static inline __m128i _mm_absdiff_epi32(__m128i a, __m128i b) { #ifndef ARM __m128i cmp, difab, difba; cmp = _mm_cmpgt_epi32(a, b); difab = _mm_sub_epi32(a, b); difba = _mm_sub_epi32(b, a); difab = _mm_and_si128(cmp, difab); difba = _mm_andnot_si128(cmp, difba); return _mm_or_si128(difab, difba); #else return vreinterpretq_m128i_s32(vabdq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); #endif } static inline __m128i _mm_absdiff_epi8(__m128i a, __m128i b) { #ifndef ARM __m128i cmp, difab, difba; cmp = _mm_cmpgt_epi8(a, b); difab = _mm_sub_epi8(a, b); difba = _mm_sub_epi8(b, a); difab = _mm_and_si128(cmp, difab); difba = _mm_andnot_si128(cmp, difba); return _mm_or_si128(difab, difba); #else return vreinterpretq_m128i_s8(vabdq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); #endif } static inline void absdiff16s_128s(int16_t *src1, int16_t *src2, int16_t *dst, int len) { int stop_len = len / SSE_LEN_INT16; stop_len *= SSE_LEN_INT16; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), SSE_LEN_BYTES)) { for (int i = 0; i < stop_len; i += SSE_LEN_INT16) { __m128i a = _mm_load_si128((__m128i *) (src1 + i)); __m128i b = _mm_load_si128((__m128i *) (src2 + i)); _mm_store_si128((__m128i *) (dst + i), _mm_absdiff_epi16(a, b)); } } else { for (int i = 0; i < stop_len; i += SSE_LEN_INT16) { __m128i a = _mm_loadu_si128((__m128i *) (src1 + i)); __m128i b = _mm_loadu_si128((__m128i *) (src2 + i)); _mm_storeu_si128((__m128i *) (dst + i), _mm_absdiff_epi16(a, b)); } } for (int i = stop_len; i < len; i++) { dst[i] = abs(src1[i] - src2[i]); } } /* static inline void print8i(__m128i v) { int16_t *p = (int16_t *) &v; #ifndef __SSE2__ _mm_empty(); #endif printf("[%d, %d, %d, %d,%d, %d, %d, %d]", p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); }*/ static inline void powerspect16s_128s_interleaved(complex16s_t *src, int32_t *dst, int len) { int stop_len = len / SSE_LEN_INT32; stop_len *= SSE_LEN_INT32; int j = 0; if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), SSE_LEN_BYTES)) { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { __m128i reim = _mm_load_si128((__m128i *) ((const int16_t *) src + j)); // print8i(reim); printf("\n"); _mm_store_si128((__m128i *) (dst + i), _mm_madd_epi16(reim, reim)); j += SSE_LEN_INT16; } } else { for (int i = 0; i < stop_len; i += SSE_LEN_INT32) { __m128i reim = _mm_loadu_si128((__m128i *) ((const int16_t *) src + j)); _mm_storeu_si128((__m128i *) (dst + i), _mm_madd_epi16(reim, reim)); j += SSE_LEN_INT16; } } for (int i = stop_len; i < len; i++) { dst[i] = (int32_t) src[i].re * (int32_t) src[i].re + (int32_t) src[i].im * (int32_t) src[i].im; } }
munit.c
/* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (1)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) # define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) # define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL munit_bool munit_error_jmp_buf_valid = 0; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to * change something, please do it there so we can keep the code in * sync. */ /* Clocks (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) # include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_CLOCK__COMPILER_ATTRIBUTES # endif # define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX * time. Keep in mind that this clock doesn't account for leap * seconds, and can go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* The CPU time is a clock which increases only when the current * process is active (i.e., it doesn't increment while blocking on * I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* Monotonic time is always running (unlike CPU time), but it only ever moves forward unless you reboot the system. Things like NTP adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) # define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else # define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* We want to be able to detect the libc implementation, so we include <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) # include <limits.h> # include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* These are known to work without librt. If you know of others * please let us know so we can add them. */ # if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # elif !defined(PSNIP_CLOCK_NO_LIBRT) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # endif #endif #if defined(_WIN32) # if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER # endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME # endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) # include <time.h> # if !defined(PSNIP_CLOCK_WALL_METHOD) # if defined(CLOCK_REALTIME_PRECISE) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE # elif !defined(__sun) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME # endif # endif # if !defined(PSNIP_CLOCK_CPU_METHOD) # if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID # elif defined(CLOCK_VIRTUAL) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL # endif # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # if defined(CLOCK_MONOTONIC_RAW) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # elif defined(CLOCK_MONOTONIC_PRECISE) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE # elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # endif # endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) # if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY # endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) # error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) # include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) # include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) # include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) # include <sys/time.h> # include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) # include <CoreServices/CoreServices.h> # include <mach/mach.h> # include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres (clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision (void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time (struct PsnipClockTimespec* res) { (void) res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision (void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) -1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void) res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision (void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* Returns the number of ticks per second for the specified clock. * For example, a clock with millisecond precision would return 1000, * and a clock with 1 second (such as the time() function) would * return 1. * * If the requested clock isn't available, it will return 0. * Hopefully this will be rare, but if it happens to you please let us * know so we can work on finding a way to support your system. * * Note that different clocks on the same system often have a * different precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision (enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision (); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision (); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision (); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* Set the provided timespec to the requested time. Returns 0 on * success, or a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time (res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time (res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time (res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #else # include <time.h> #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { munit_bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = 1; } else { ret = 0; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline munit_bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return 1; } else { return 0; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { munit_uint32_t seed, state; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wc = { 0, }; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; #else seed = (munit_uint32_t) time(NULL); #endif state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; munit_bool single_parameter_mode; void* user_data; MunitReport report; munit_bool colorize; munit_bool fork; munit_bool show_stderr; munit_bool fatal_failures; } MunitTestRunner; const char* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } #if defined(MUNIT_ENABLE_TIMING) static void munit_print_time(FILE* fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC)); } #endif /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) { *params = (MunitParameter*)realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = (char*)malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (1); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, }; struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, }; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; munit_bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = 1; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = 0; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = 1; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; char** values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; *values != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; munit_bool filled; unsigned int possible; char** vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = 0; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = 1; break; } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; *vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for (test = suite->tests ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, munit_bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; munit_bool first; char** val; const MunitSuite* child_suite; for (test = suite->tests ; test != NULL && test->name != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = 1; for (val = params->values ; *val != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = 0; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static munit_bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return 0; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = 0; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = 0; #if !defined(_WIN32) runner.fork = 1; #else runner.fork = 0; #endif runner.show_stderr = 0; runner.fatal_failures = 0; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = (MunitParameter*)realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (char*) argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = 1; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = 0; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = 1; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = 1; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = 0; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = 1; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 0, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 1, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = (const char**)realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed %08x ...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
GB_binop__band_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__band_uint32 // A.*B function (eWiseMult): GB_AemultB__band_uint32 // A*D function (colscale): GB_AxD__band_uint32 // D*A function (rowscale): GB_DxB__band_uint32 // C+=B function (dense accum): GB_Cdense_accumB__band_uint32 // C+=b function (dense accum): GB_Cdense_accumb__band_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_uint32 // C=scalar+B GB_bind1st__band_uint32 // C=scalar+B' GB_bind1st_tran__band_uint32 // C=A+scalar GB_bind2nd__band_uint32 // C=A'+scalar GB_bind2nd_tran__band_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x) & (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT32 || GxB_NO_BAND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__band_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__band_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__band_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__band_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__band_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__band_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__band_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__band_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__band_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB_bind1st_tran__band_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB_bind2nd_tran__band_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ocp_nlp_sqp.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp.h" // external #include <assert.h> #include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" #include "acados_c/ocp_qp_interface.h" /************************************************ * options ************************************************/ acados_size_t ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // int ii; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP opts opts->max_iter = 20; opts->tol_stat = 1e-8; opts->tol_eq = 1e-8; opts->tol_ineq = 1e-8; opts->tol_comp = 1e-8; opts->ext_qp_res = 0; opts->qp_warm_start = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->print_level = 0; opts->initialize_t_slacks = 0; // overwrite default submodules opts // qp tolerance qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp); return; } void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "max_iter")) { int* max_iter = (int *) value; opts->max_iter = *max_iter; } else if (!strcmp(field, "tol_stat")) { double* tol_stat = (double *) value; opts->tol_stat = *tol_stat; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value); } else if (!strcmp(field, "tol_eq")) { double* tol_eq = (double *) value; opts->tol_eq = *tol_eq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value); } else if (!strcmp(field, "tol_ineq")) { double* tol_ineq = (double *) value; opts->tol_ineq = *tol_ineq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value); } else if (!strcmp(field, "tol_comp")) { double* tol_comp = (double *) value; opts->tol_comp = *tol_comp; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value); } else if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level); exit(1); } opts->print_level = *print_level; } else if (!strcmp(field, "initialize_t_slacks")) { int* initialize_t_slacks = (int *) value; if (*initialize_t_slacks != 0 && *initialize_t_slacks != 1) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for initialize_t_slacks field, need int 0 or 1, got %d.", *initialize_t_slacks); exit(1); } opts->initialize_t_slacks = *initialize_t_slacks; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); } } return; } void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, size_t stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); return; } /************************************************ * memory ************************************************/ acados_size_t ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_memory); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = opts->max_iter+1; int stat_n = 6; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 3*8; // align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_memory); align_char_to(8, &c_ptr); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = opts->max_iter+1; mem->stat_n = 6; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; align_char_to(8, &c_ptr); assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ acados_size_t ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; acados_size_t size = 0; // sqp size += sizeof(ocp_nlp_sqp_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer0, timer1; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; // zero timers double total_time = 0.0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_tot = 0.0; mem->time_glob = 0.0; mem->time_sim = 0.0; mem->time_sim_la = 0.0; mem->time_sim_ad = 0.0; int N = dims->N; int ii; int qp_iter = 0; int qp_status = 0; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere. for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // if (opts->initialize_t_slacks > 0) ocp_nlp_initialize_t_slacks(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // main sqp loop int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; for (; sqp_iter < opts->max_iter; sqp_iter++) { // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); #ifdef MEASURE_TIMINGS // get timings from integrator for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim", &tmp_time); mem->time_sim += tmp_time; config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim_la", &tmp_time); mem->time_sim_la += tmp_time; config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim_ad", &tmp_time); mem->time_sim_ad += tmp_time; } #endif // MEASURE_TIMINGS // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // compute nlp residuals ocp_nlp_res_compute(dims, nlp_in, nlp_out, nlp_mem->nlp_res, nlp_mem); nlp_out->inf_norm_res = nlp_mem->nlp_res->inf_norm_res_stat; nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_eq > nlp_out->inf_norm_res) ? nlp_mem->nlp_res->inf_norm_res_eq : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_ineq > nlp_out->inf_norm_res) ? nlp_mem->nlp_res->inf_norm_res_ineq : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_comp > nlp_out->inf_norm_res) ? nlp_mem->nlp_res->inf_norm_res_comp : nlp_out->inf_norm_res; if (opts->print_level > sqp_iter + 1) print_ocp_qp_in(nlp_mem->qp_in); // save statistics if (sqp_iter < mem->stat_m) { mem->stat[mem->stat_n*sqp_iter+0] = nlp_mem->nlp_res->inf_norm_res_stat; mem->stat[mem->stat_n*sqp_iter+1] = nlp_mem->nlp_res->inf_norm_res_eq; mem->stat[mem->stat_n*sqp_iter+2] = nlp_mem->nlp_res->inf_norm_res_ineq; mem->stat[mem->stat_n*sqp_iter+3] = nlp_mem->nlp_res->inf_norm_res_comp; } // exit conditions on residuals if ((nlp_mem->nlp_res->inf_norm_res_stat < opts->tol_stat) & (nlp_mem->nlp_res->inf_norm_res_eq < opts->tol_eq) & (nlp_mem->nlp_res->inf_norm_res_ineq < opts->tol_ineq) & (nlp_mem->nlp_res->inf_norm_res_comp < opts->tol_comp)) { // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time nlp_out->total_time = total_time; mem->time_tot = total_time; #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; if (opts->print_level > 0) { printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat, nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp ); printf("\n\n"); } return mem->status; } // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // (typically) no warm start at first iteration if (sqp_iter == 0 && !opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // restore default warm start if (sqp_iter==0) { config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &opts->qp_warm_start); } // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; // printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter); qp_iter = qp_info_->num_iter; // save statistics of last qp solver call if (sqp_iter+1 < mem->stat_m) { mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status; mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter; } // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); if (sqp_iter+1 < mem->stat_m) ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6)); } if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(nlp_mem->qp_in); if (opts->print_level > 0) { printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat, nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp ); printf("\n\n"); } // increment sqp_iter to return full statistics and improve output below. sqp_iter++; // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time mem->time_tot = total_time; nlp_out->total_time = total_time; #ifndef ACADOS_SILENT printf("\nQP solver returned error status %d in SQP iteration %d, QP iteration %d.\n", qp_status, sqp_iter, qp_iter); #endif #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif if (opts->print_level > 1) { printf("\n Failed to solve the following QP:\n"); if (opts->print_level > sqp_iter + 1) print_ocp_qp_in(nlp_mem->qp_in); } mem->status = ACADOS_QP_FAILURE; return mem->status; } // globalization acados_tic(&timer1); double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_glob += acados_toc(&timer1); // update variables ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // ??? @rien // for (int_t i = 0; i < N; i++) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme == NULL) // continue; // opts->sens_adj = (opts->scheme->type != exact); // if (nlp_in->freezeSens) { // // freeze inexact sensitivities after first SQP iteration !! // opts->scheme->freeze = true; // } // } if (opts->print_level > 0) { if (sqp_iter%10 == 0) { printf("# it\tstat\t\teq\t\tineq\t\tcomp\n"); } printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat, nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp ); } } // stop timer total_time += acados_toc(&timer0); if (opts->print_level > 0) printf("\n\n"); // ocp_nlp_out_print(nlp_out); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // save time mem->time_tot = total_time; nlp_out->total_time = total_time; // maximum number of iterations reached #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_MAXITER; #ifndef ACADOS_SILENT printf("\n ocp_nlp_sqp: maximum iterations reached\n"); #endif return mem->status; } int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(all) add flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage); exit(1); } return; } // TODO rename memory_get ??? void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = mem->sqp_iter; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_glob", field)) { double *value = return_value_; *value = mem->time_glob; } else if (!strcmp("time_sim", field)) { double *value = return_value_; *value = mem->time_sim; } else if (!strcmp("time_sim_la", field)) { double *value = return_value_; *value = mem->time_sim_la; } else if (!strcmp("time_sim_ad", field)) { double *value = return_value_; *value = mem->time_sim_ad; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_mem->nlp_res; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else if (!strcmp("res_stat", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_stat; } else if (!strcmp("res_eq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_eq; } else if (!strcmp("res_ineq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_ineq; } else if (!strcmp("res_comp", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_comp; } else if (!strcmp("cost_value", field)) { double *value = return_value_; *value = mem->nlp_mem->cost_value; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field); exit(1); } } void ocp_nlp_sqp_opts_get(void *config_, void *dims_, void *opts_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; if (!strcmp("nlp_opts", field)) { void **value = return_value_; *value = opts->nlp_opts; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_opts_get\n", field); exit(1); } } void ocp_nlp_sqp_work_get(void *config_, void *dims_, void *work_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_workspace *work = work_; if (!strcmp("nlp_work", field)) { void **value = return_value_; *value = work->nlp_work; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_work_get\n", field); exit(1); } } void ocp_nlp_sqp_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_opts_update; config->opts_set = &ocp_nlp_sqp_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp; config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default; config->precompute = &ocp_nlp_sqp_precompute; config->get = &ocp_nlp_sqp_get; config->opts_get = &ocp_nlp_sqp_opts_get; config->work_get = &ocp_nlp_sqp_work_get; return; }
fill_int2e.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include "config.h" #include "cint.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) #define MIN(I,J) ((I) < (J) ? (I) : (J)) int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter) { int i; int i0 = shls_slice[0]; int i1 = shls_slice[1]; int di = 0; for (i = 1; i < ncenter; i++) { i0 = MIN(i0, shls_slice[i*2 ]); i1 = MAX(i1, shls_slice[i*2+1]); } for (i = i0; i < i1; i++) { di = MAX(di, ao_loc[i+1]-ao_loc[i]); } return di; } int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env) { int i, n; int i0 = shls_slice[0]; int i1 = shls_slice[1]; for (i = 1; i < ncenter; i++) { i0 = MIN(i0, shls_slice[i*2 ]); i1 = MAX(i1, shls_slice[i*2+1]); } int shls[4]; int cache_size = 0; for (i = i0; i < i1; i++) { shls[0] = i; shls[1] = i; shls[2] = i; shls[3] = i; n = (*intor)(NULL, NULL, shls, atm, natm, bas, nbas, env, NULL, NULL); cache_size = MAX(cache_size, n); } return cache_size; } /* ************************************************* * 2e AO integrals in s4, s2ij, s2kl, s1 */ void GTOnr2e_fill_s1(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * nj; size_t nkl = nk * nl; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0 * nj + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh; int shls[4]; double *eri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*nl+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*nl+l0; for (icomp = 0; icomp < comp; icomp++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s2ij(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (ishp < jshp) { return; } int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; //int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; //int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * (ni+1) / 2; size_t nkl = nk * nl; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0*(i0+1)/2 + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh; int shls[4]; double *eri0, *peri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*nl+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*nl+l0; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s2kl(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; //int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; //int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * nj; size_t nkl = nk * (nk+1) / 2; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0 * nj + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh, kshp, lshp; int shls[4]; double *eri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (kshp = 0; kshp < ksh1-ksh0; kshp++) { for (lshp = 0; lshp <= kshp; lshp++) { ksh = kshp + ksh0; lsh = lshp + lsh0; shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { if (kshp > lshp) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*(k0+1)/2+l0; for (icomp = 0; icomp < comp; icomp++) { if (kshp > lshp) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s4(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (ishp < jshp) { return; } int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; //int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; //int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; //int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; //int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * (ni+1) / 2; size_t nkl = nk * (nk+1) / 2; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0*(i0+1)/2 + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh, kshp, lshp; int shls[4]; double *eri0, *peri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (kshp = 0; kshp < ksh1-ksh0; kshp++) { for (lshp = 0; lshp <= kshp; lshp++) { ksh = kshp + ksh0; lsh = lshp + lsh0; shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (kshp > lshp && ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else if (ish > jsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } else if (ksh > lsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (kshp > lshp && ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else if (ish > jsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } else if (ksh > lsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } eri0 += neri; } } } } } static int no_prescreen() { return 1; } void GTOnr2e_fill_drv(int (*intor)(), void (*fill)(), int (*fprescreen)(), double *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (fprescreen == NULL) { fprescreen = no_prescreen; } const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 4); const int cache_size = GTOmax_cache_size(intor, shls_slice, 4, atm, natm, bas, nbas, env); #pragma omp parallel { int ij, i, j; double *buf = malloc(sizeof(double) * (di*di*di*di*comp + cache_size)); #pragma omp for nowait schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, fprescreen, eri, buf, comp, i, j, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
GB_apply_op.c
//------------------------------------------------------------------------------ // GB_apply_op: typecast and apply a unary operator to an array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Cx = op ((xtype) Ax) // Cx and Ax may be aliased. // Compare with GB_transpose_op.c #include "GB_apply.h" #include "GB_unused.h" #ifndef GBCOMPACT #include "GB_iterator.h" #include "GB_unaryop__include.h" #endif void GB_apply_op // apply a unary operator, Cx = op ((xtype) Ax) ( GB_void *GB_RESTRICT Cx, // output array, of type op->ztype const GrB_UnaryOp op, // operator to apply const GB_void *GB_RESTRICT Ax, // input array, of type Atype const GrB_Type Atype, // type of Ax const int64_t anz, // size of Ax and Cx GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (Cx != NULL) ; ASSERT (Ax != NULL) ; ASSERT (anz >= 0) ; ASSERT (Atype != NULL) ; ASSERT (op != NULL) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (anz, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // define the worker for the switch factory //-------------------------------------------------------------------------- // FUTURE:: these operators could be renamed: // GrB_AINV_BOOL and GxB_ABS_BOOL to GrB_IDENTITY_BOOL. // GrB_MINV_BOOL to GxB_ONE_BOOL. // GxB_ABS_UINT* to GrB_IDENTITY_UINT*. // and then these workers would not need to be created. #define GB_unop(op,zname,aname) GB_unop_ ## op ## zname ## aname #define GB_WORKER(op,zname,ztype,aname,atype) \ { \ GrB_Info info = GB_unop (op,zname,aname) ((ztype *) Cx, \ (atype *) Ax, anz, nthreads) ; \ if (info == GrB_SUCCESS) return ; \ } \ break ; //-------------------------------------------------------------------------- // launch the switch factory //-------------------------------------------------------------------------- #ifndef GBCOMPACT #include "GB_unaryop_factory.c" #endif //-------------------------------------------------------------------------- // generic worker: typecast and apply an operator //-------------------------------------------------------------------------- GB_BURBLE_N (anz, "generic ") ; size_t asize = Atype->size ; size_t zsize = op->ztype->size ; size_t xsize = op->xtype->size ; GB_cast_function cast_A_to_X = GB_cast_factory (op->xtype->code, Atype->code) ; GxB_unary_function fop = op->function ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { // xwork = (xtype) Ax [p] GB_void xwork [GB_VLA(xsize)] ; cast_A_to_X (xwork, Ax +(p*asize), asize) ; // Cx [p] = fop (xwork) fop (Cx +(p*zsize), xwork) ; } }
GB_unaryop__lnot_int16_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_int8 // op(A') function: GB_tran__lnot_int16_int8 // C type: int16_t // A type: int8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_int8 ( int16_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <dmlc/serializer.h> #include <xgboost/base.h> #include <xgboost/span.h> #include <xgboost/host_device_vector.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4, kStr = 5 }; enum class FeatureType : uint8_t { kNumerical, kCategorical }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 11; /*! \brief number of rows in the data */ uint64_t num_row_{0}; // NOLINT /*! \brief number of columns in the data */ uint64_t num_col_{0}; // NOLINT /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; // NOLINT /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; // NOLINT /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; // NOLINT /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; // NOLINT /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; // NOLINT /*! * \brief lower bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT /*! * \brief upper bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT /*! * \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q" */ std::vector<std::string> feature_type_names; /*! * \brief Name for each feature. */ std::vector<std::string> feature_names; /* * \brief Type of each feature. Automatically set when feature_type_names is specifed. */ HostDeviceVector<FeatureType> feature_types; /* * \brief Weight of each feature, used to define the probability of each feature being * selected when using column sampling. */ HostDeviceVector<float> feature_weigths; /*! \brief default constructor */ MetaInfo() = default; MetaInfo(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo const& that) = delete; /*! * \brief Validate all metainfo. */ void Validate(int32_t device) const; MetaInfo Slice(common::Span<int32_t const> ridxs) const; /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. * * [ column_0, column_1, ... column_n ] * * Right now only 1 column is permitted. */ void SetInfo(const char* key, std::string const& interface_str); void GetInfo(char const* key, bst_ulong* out_len, DataType dtype, const void** out_dptr) const; void SetFeatureInfo(const char *key, const char **info, const bst_ulong size); void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const; /* * \brief Extend with other MetaInfo. * * \param that The other MetaInfo object. * * \param accumulate_rows Whether rows need to be accumulated in this function. If * client code knows number of rows in advance, set this parameter to false. */ void Extend(MetaInfo const& that, bool accumulate_rows); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin{0}; /*! \brief Page size for external memory mode. */ size_t gpu_page_size; BatchParam() = default; BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0) : gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {} inline bool operator!=(const BatchParam& other) const { return gpu_id != other.gpu_id || max_bin != other.max_bin || gpu_page_size != other.gpu_page_size; } }; struct HostSparsePageView { using Inst = common::Span<Entry const>; common::Span<bst_row_t const> offset; common::Span<Entry const> data; Inst operator[](size_t i) const { auto size = *(offset.data() + i + 1) - *(offset.data() + i); return {data.data() + *(offset.data() + i), static_cast<Inst::index_type>(size)}; } size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid {0}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size = offset_vec[i + 1] - offset_vec[i]; return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } HostSparsePageView GetView() const { return {offset.ConstHostSpan(), data.ConstHostSpan()}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); EllpackPage(EllpackPage&& that); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() = default; virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; // NOLINT explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator&) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {} BatchIterator<T> begin() { return begin_iter_; } // NOLINT BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT private: BatchIterator<T> begin_iter_; }; struct XGBAPIThreadLocalEntry; /*! * \brief Internal data structured used by XGBoost during training. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; virtual void SetInfo(const char *key, const void *dptr, DataType dtype, size_t num) { this->Info().SetInfo(key, dptr, dtype, num); } virtual void SetInfo(const char* key, std::string const& interface_str) { this->Info().SetInfo(key, interface_str); } /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /*! \brief Get thread local memory for returning data from DMatrix. */ XGBAPIThreadLocalEntry& GetThreadLocal() const; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); template <typename T> bool PageExists() const; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief virtual destructor */ virtual ~DMatrix(); /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", size_t page_size = kPageSize); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = "", size_t page_size = kPageSize); /** * \brief Create a new Quantile based DMatrix used for histogram based algorithm. * * \tparam DataIterHandle External iterator type, defined in C API. * \tparam DMatrixHandle DMatrix handle, defined in C API. * \tparam DataIterResetCallback Callback for reset, prototype defined in C API. * \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API. * * \param iter External data iterator * \param proxy A hanlde to ProxyDMatrix * \param reset Callback for reset * \param next Callback for next * \param missing Value that should be treated as missing. * \param nthread number of threads used for initialization. * \param max_bin Maximum number of bins. * * \return A created quantile based DMatrix. */ template <typename DataIterHandle, typename DMatrixHandle, typename DataIterResetCallback, typename XGDMatrixCallbackNext> static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy, DataIterResetCallback *reset, XGDMatrixCallbackNext *next, float missing, int nthread, int max_bin); virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0; /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; virtual bool EllpackExists() const = 0; virtual bool SparsePageExists() const = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline bool DMatrix::PageExists<EllpackPage>() const { return this->EllpackExists(); } template<> inline bool DMatrix::PageExists<SparsePage>() const { return this->SparsePageExists(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); namespace serializer { template <> struct Handler<xgboost::Entry> { inline static void Write(Stream* strm, const xgboost::Entry& data) { strm->Write(data.index); strm->Write(data.fvalue); } inline static bool Read(Stream* strm, xgboost::Entry* data) { return strm->Read(&data->index) && strm->Read(&data->fvalue); } }; } // namespace serializer } // namespace dmlc #endif // XGBOOST_DATA_H_
sample_task_multiple_producer.c
#include <omp.h> #include <stdio.h> #include <sys/time.h> int main(int argc, char * argv[]) { int i,num=(argc>1)?atoi(argv[1]):100; int nthreads; struct timeval t_start, t_end; double time; double *a = (double *)malloc(sizeof(double)*num); #pragma omp parallel { nthreads=omp_get_num_threads(); } for(i=0;i<num;i++){ a[i]=i; } gettimeofday(&t_start,NULL); #pragma omp parallel { #pragma omp for for(i=0;i<num;i++){ #pragma omp task { a[i]*=0.9; } } } gettimeofday(&t_end,NULL); time=(t_end.tv_sec * 1000000 + t_end.tv_usec) - (t_start.tv_sec * 1000000 + t_start.tv_usec); printf("%d %f\n",nthreads,time/1000000.0); for(i=0;i<num;i++){ if(a[i]!=i*0.9){ printf("a[%d]=%f != %f\n",i,a[i],i*0.9); return 1; } } }
csf.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include <omp.h> #include "csf.h" #include "sort.h" #include "tile.h" #include "util.h" #include "mttkrp.h" #include "io.h" #include <omp.h> /****************************************************************************** * API FUNCTIONS *****************************************************************************/ int splatt_csf_load( char const * const fname, splatt_idx_t * nmodes, splatt_csf ** tensors, double const * const options) { sptensor_t * tt = tt_read(fname); if(tt == NULL) { return SPLATT_ERROR_BADINPUT; } tt_remove_empty(tt); *tensors = csf_alloc(tt, options); *nmodes = tt->nmodes; tt_free(tt); return SPLATT_SUCCESS; } int splatt_csf_convert( splatt_idx_t const nmodes, splatt_idx_t const nnz, splatt_fidx_t ** const inds, splatt_storage_val_t * const vals, splatt_csf ** tensors, double const * const options) { sptensor_t tt; tt_fill(&tt, nnz, nmodes, inds, vals); tt_remove_empty(&tt); *tensors = csf_alloc(&tt, options); return SPLATT_SUCCESS; } void splatt_free_csf( splatt_csf * tensors, double const * const options) { csf_free(tensors, options); } /****************************************************************************** * MEMORY ALLOCATION FUNCTIONS *****************************************************************************/ /** * @brief Allocate an fptr array, respecting data size and HBW allocation. * * @param n_elems The size of the fptr array, in # elements. * * @return A (void *) pointer to the array. */ void * p_alloc_fptr( idx_t n_elems) { /* This lets us auto-size based on struct definition. */ csf_sparsity sparsity; #if SPLATT_CSF_FPTR_HBW return splatt_hbw_malloc(n_elems * sizeof(**sparsity.fptr)); #else return splatt_malloc(n_elems * sizeof(**sparsity.fptr)); #endif } /** * @brief Free an fptr array allocated by p_alloc_fptr(). * * @param fptr The array to free. */ void p_free_fptr( void * fptr) { #if SPLATT_CSF_FPTR_HBW splatt_hbw_free(fptr); #else splatt_free(fptr); #endif } /** * @brief Allocate an fids array, respecting data size and HBW allocation. * * @param n_elems The size of the fids array, in # elements. * * @return A (void *) pointer to the array. */ void * p_alloc_fids( idx_t n_elems) { /* This lets us auto-size based on struct definition. */ csf_sparsity sparsity; #if SPLATT_CSF_FIDS_HBW return splatt_hbw_malloc(n_elems * sizeof(**sparsity.fids)); #else return splatt_malloc(n_elems * sizeof(**sparsity.fids)); #endif } /** * @brief Free an fids array allocated by p_alloc_fids(). * * @param fids The array to free. */ void p_free_fids( void * fids) { #if SPLATT_CSF_FIDS_HBW splatt_hbw_free(fids); #else splatt_free(fids); #endif } /** * @brief Allocate a vals array, respecting data size and HBW allocation. * * @param n_elems The size of th vals array, in # elements. * * @return A (void *) pointer to the array. */ void * p_alloc_vals( idx_t n_elems) { /* This lets us auto-size based on struct definition. */ csf_sparsity sparsity; #if SPLATT_CSF_VALS_HBW return splatt_hbw_malloc(n_elems * sizeof(*sparsity.vals)); #else return splatt_malloc(n_elems * sizeof(*sparsity.vals)); #endif } /** * @brief Free a vals array allocated by p_alloc_vals(). * * @param vals The array to free. */ void p_free_vals( void * vals) { #if SPLATT_CSF_VALS_HBW splatt_hbw_free(vals); #else splatt_free(vals); #endif } /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Find a permutation of modes that results in non-increasing mode size. * * @param dims The tensor dimensions. * @param nmodes The number of modes. * @param perm_dims The resulting permutation. */ static void p_order_dims_small( idx_t const * const dims, idx_t const nmodes, idx_t * const perm_dims) { idx_t sorted[MAX_NMODES]; idx_t matched[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { sorted[m] = dims[m]; matched[m] = 0; } quicksort(sorted, nmodes); /* silly n^2 comparison to grab modes from sorted dimensions. * TODO: make a key/val sort...*/ for(idx_t mfind=0; mfind < nmodes; ++mfind) { for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) { if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) { perm_dims[mfind] = mcheck; matched[mcheck] = 1; break; } } } } /** * Same as p_order_dims_small but don't put dims that can be privated * as the root */ static void p_order_dims_small_no_privatization( idx_t const * const dims, idx_t const nmodes, idx_t * const perm_dims, idx_t nnz, double const * const opts) { p_order_dims_small(dims, nmodes, perm_dims); /* find where custom_mode was placed and adjust from there */ for(idx_t m=0; m < nmodes; ++m) { idx_t perm_dim = perm_dims[m]; if(dims[perm_dims[m]] >= 32*1024) { // don't make it root mode if it's too short to avoid load imbalance memmove(perm_dims + 1, perm_dims, (m) * sizeof(m)); perm_dims[0] = perm_dim; break; } } } /** * @brief Find a permutation of modes such that the first mode is 'custom-mode' * and the remaining are naturally ordered (0, 1, ...). * * @param dims The tensor dimensions. * @param nmodes The number of modes. * @param custom_mode The mode to place first. * @param perm_dims The resulting permutation. */ static void p_order_dims_inorder( idx_t const * const dims, idx_t const nmodes, idx_t const custom_mode, idx_t * const perm_dims) { /* initialize to natural ordering */ for(idx_t m=0; m < nmodes; ++m) { perm_dims[m] = m; } /* find where custom_mode was placed and adjust from there */ for(idx_t m=0; m < nmodes; ++m) { if(perm_dims[m] == custom_mode) { memmove(perm_dims + 1, perm_dims, (m) * sizeof(m)); perm_dims[0] = custom_mode; break; } } } static void p_order_dims_round_robin( idx_t const * const dims, idx_t const nmodes, idx_t const custom_mode, idx_t * const perm_dims) { for(idx_t m=0; m < nmodes; ++m) { perm_dims[m] = (custom_mode + m)%nmodes; } } static void p_order_dims_all_permute( idx_t const * const dims, idx_t const nmodes, idx_t const custom_mode, idx_t * const perm_dims) { if (custom_mode == 0) { perm_dims[0] = 0; perm_dims[1] = 1; perm_dims[2] = 2; } else if (custom_mode == 1) { perm_dims[0] = 0; perm_dims[1] = 2; perm_dims[2] = 1; } else if (custom_mode == 2) { perm_dims[0] = 1; perm_dims[1] = 0; perm_dims[2] = 2; } else if (custom_mode == 3) { perm_dims[0] = 1; perm_dims[1] = 2; perm_dims[2] = 0; } else if (custom_mode == 4) { perm_dims[0] = 2; perm_dims[1] = 0; perm_dims[2] = 1; } else if (custom_mode == 5) { perm_dims[0] = 2; perm_dims[1] = 1; perm_dims[2] = 0; } else { assert(0); } } /** * @brief Find a permutation of modes such that the first mode is 'custom-mode' * and the remaining are sorted in non-increasing order. * * @param dims The tensor dimensions. * @param nmodes The number of modes. * @param custom_mode The mode to place first. * @param perm_dims The resulting permutation. */ static void p_order_dims_minusone( idx_t const * const dims, idx_t const nmodes, idx_t const custom_mode, idx_t * const perm_dims) { p_order_dims_small(dims, nmodes, perm_dims); /* find where custom_mode was placed and adjust from there */ for(idx_t m=0; m < nmodes; ++m) { if(perm_dims[m] == custom_mode) { memmove(perm_dims + 1, perm_dims, (m) * sizeof(m)); perm_dims[0] = custom_mode; break; } } } /** * @brief Find a permutation of modes that results in non-decreasing mode size. * * @param dims The tensor dimensions. * @param nmodes The number of modes. * @param perm_dims The resulting permutation. */ static void p_order_dims_large( idx_t const * const dims, idx_t const nmodes, idx_t * const perm_dims) { idx_t sorted[MAX_NMODES]; idx_t matched[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { sorted[m] = dims[m]; matched[m] = 0; } /* sort small -> large */ quicksort(sorted, nmodes); /* reverse list */ for(idx_t m=0; m < nmodes/2; ++m) { idx_t tmp = sorted[nmodes-m-1]; sorted[nmodes-m-1] = sorted[m]; sorted[m] = tmp; } /* silly n^2 comparison to grab modes from sorted dimensions. * TODO: make a key/val sort...*/ for(idx_t mfind=0; mfind < nmodes; ++mfind) { for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) { if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) { perm_dims[mfind] = mcheck; matched[mcheck] = 1; break; } } } } void splatt_csf_write_file( splatt_csf const * const ct, FILE * fout) { fwrite(&ct->nnz, sizeof(ct->nnz), 1, fout); fwrite(&ct->nmodes, sizeof(ct->nmodes), 1, fout); fwrite(ct->dims, sizeof(*ct->dims), ct->nmodes, fout); fwrite(ct->dim_perm, sizeof(*ct->dim_perm), ct->nmodes, fout); fwrite(&ct->which_tile, sizeof(ct->which_tile), 1, fout); fwrite(&ct->ntiles, sizeof(ct->ntiles), 1, fout); fwrite(ct->tile_dims, sizeof(*ct->tile_dims), ct->nmodes, fout); for(idx_t t=0; t < ct->ntiles; ++t) { csf_sparsity const * const ft = ct->pt + t; fwrite(ft->nfibs, sizeof(*ft->nfibs), ct->nmodes, fout); for(idx_t m=0; m < ct->nmodes-1; ++m) { fwrite(ft->fptr[m], sizeof(**ft->fptr), ft->nfibs[m] + 1, fout); if (m != 0) { // FIXME fwrite(ft->fids[m], sizeof(**ft->fids), ft->nfibs[m], fout); } } fwrite(ft->fids[ct->nmodes - 1], sizeof(*ft->fids[ct->nmodes - 1]), ft->nfibs[ct->nmodes-1], fout); fwrite(ft->vals, sizeof(*ft->vals), ft->nfibs[ct->nmodes-1], fout); } } void splatt_csf_write( splatt_csf const * const ct, char const * const ofname, int ncopies) { FILE * fout = fopen(ofname,"w"); if (fout == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", ofname); return; } timer_start(&timers[TIMER_IO]); fwrite(&ncopies, sizeof(ncopies), 1, fout); for (int i = 0; i < ncopies; ++i) { splatt_csf_write_file(ct + i, fout); } timer_stop(&timers[TIMER_IO]); fclose(fout); } void splatt_csf_read_file( splatt_csf *ct, FILE * fin) { fread(&ct->nnz, sizeof(ct->nnz), 1, fin); fread(&ct->nmodes, sizeof(ct->nmodes), 1, fin); fread(ct->dims, sizeof(*ct->dims), ct->nmodes, fin); fread(ct->dim_perm, sizeof(*ct->dim_perm), ct->nmodes, fin); fread(&ct->which_tile, sizeof(ct->which_tile), 1, fin); fread(&ct->ntiles, sizeof(ct->ntiles), 1, fin); fread(&ct->tile_dims, sizeof(*ct->tile_dims), ct->nmodes, fin); ct->pt = splatt_malloc(sizeof(*(ct->pt))*ct->ntiles); for(idx_t t=0; t < ct->ntiles; ++t) { csf_sparsity * ft = ct->pt + t; fread(ft->nfibs, sizeof(*ft->nfibs), ct->nmodes, fin); for(idx_t m=0; m < ct->nmodes-1; ++m) { ft->fptr[m] = p_alloc_fptr(ft->nfibs[m] + 1); fread(ft->fptr[m], sizeof(*ft->fptr[m]), ft->nfibs[m]+1, fin); if (m != 0) { // FIXME ft->fids[m] = p_alloc_fids(ft->nfibs[m]); fread(ft->fids[m], sizeof(*ft->fids[m]), ft->nfibs[m], fin); } else { ft->fids[m] = NULL; } } ft->fids[ct->nmodes-1] = p_alloc_fids(ft->nfibs[ct->nmodes-1]); ft->vals = p_alloc_vals(ft->nfibs[ct->nmodes-1]); fread(ft->fids[ct->nmodes-1], sizeof(*ft->fids[ct->nmodes-1]), ft->nfibs[ct->nmodes-1], fin); fread(ft->vals, sizeof(*ft->vals), ft->nfibs[ct->nmodes-1], fin); } } int splatt_csf_equals(splatt_csf *ct1, splatt_csf *ct2) { if (ct1->nnz != ct2->nnz) return 0; if (ct1->nmodes != ct2->nmodes) return 0; if (memcmp(ct1->dims, ct2->dims, sizeof(*ct1->dims)*ct1->nmodes)) return 0; if (memcmp(ct1->dim_perm, ct2->dim_perm, sizeof(*ct1->dim_perm)*ct1->nmodes)) return 0; if (ct1->which_tile != ct2->which_tile) return 0; if (ct1->ntiles != ct2->ntiles) return 0; if (memcmp(ct1->tile_dims, ct2->tile_dims, sizeof(*ct1->tile_dims)*ct1->nmodes)) return 0; for(idx_t t=0; t < ct1->ntiles; ++t) { csf_sparsity const * const ft1 = ct1->pt + t; csf_sparsity const * const ft2 = ct2->pt + t; if (memcmp(ft1->nfibs, ft2->nfibs, sizeof(*ft1->nfibs)*ct1->nmodes)) return 0; for(idx_t m=0; m < ct1->nmodes-1; ++m) { if (memcmp(ft1->fptr[m], ft2->fptr[m], sizeof(*ft1->fptr[m])*(ft1->nfibs[m] + 1))) return 0; if (m != 0 && memcmp(ft1->fids[m], ft2->fids[m], sizeof(*ft1->fids[m])*ft1->nfibs[m])) return 0; } if (memcmp(ft1->fids[ct1->nmodes - 1], ft2->fids[ct2->nmodes - 1], sizeof(*ft1->fids[ct1->nmodes - 1])*ft1->nfibs[ct1->nmodes-1])) return 0; if (memcmp(ft1->vals, ft2->vals, sizeof(*ft1->vals)*ft1->nfibs[ct1->nmodes-1])) return 0; } return 1; } void splatt_csf_read( splatt_csf *ct, char const * const ifname, int ncopies) { FILE * fin = fopen(ifname,"r"); if (fin == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", ifname); return; } timer_start(&timers[TIMER_IO]); int file_ncopies = 2; fread(&file_ncopies, sizeof(file_ncopies), 1, fin); if (ncopies == -1) { splatt_csf_read_file(ct, fin); ncopies = ct->nmodes; for (int i = 1; i < ncopies; ++i) { splatt_csf_read_file(ct + i, fin); } } else { if (file_ncopies < ncopies) { fprintf(stderr, "SPLATT ERROR: %d copies are required but %s has only %d\n", ncopies, ifname, file_ncopies); } for (int i = 0; i < ncopies; ++i) { splatt_csf_read_file(ct + i, fin); } } timer_stop(&timers[TIMER_IO]); fclose(fin); } /** * @brief Print a CSF tensor in human-readable format. * * @param ct The tensor to print. */ static void p_print_csf( splatt_csf const * const ct) { printf("-----------\n"); printf("nmodes: %"SPLATT_PF_IDX" nnz: %"SPLATT_PF_IDX" ntiles: " "%"SPLATT_PF_IDX"\n", ct->nmodes, ct->nnz, ct->ntiles); printf("dims: %"SPLATT_PF_IDX"", ct->dims[0]); for(idx_t m=1; m < ct->nmodes; ++m) { printf("x%"SPLATT_PF_IDX"", ct->dims[m]); } printf(" (%"SPLATT_PF_IDX"", ct->dim_perm[0]); for(idx_t m=1; m < ct->nmodes; ++m) { printf("->%"SPLATT_PF_IDX"", ct->dim_perm[m]); } printf(") "); printf("tile dims: %"SPLATT_PF_IDX"", ct->tile_dims[0]); for(idx_t m=1; m < ct->nmodes; ++m) { printf("x%"SPLATT_PF_IDX"", ct->tile_dims[m]); } printf("\n"); for(idx_t t=0; t < ct->ntiles; ++t) { csf_sparsity const * const ft = ct->pt + t; /* skip empty tiles */ if(ft->vals == NULL) { continue; } /* write slices */ printf("tile: %"SPLATT_PF_IDX" fptr:\n", t); printf("[%"SPLATT_PF_IDX"] ", ft->nfibs[0]); for(idx_t f=0; f < ft->nfibs[0]; ++f) { if(ft->fids[0] == NULL) { printf(" %"SPLATT_PF_IDX"", ft->fptr[0][f]); } else { printf(" (%"SPLATT_PF_IDX", %"SPLATT_PF_FIDX")", ft->fptr[0][f], ft->fids[0][f]); } } printf(" %"SPLATT_PF_IDX"\n", ft->fptr[0][ft->nfibs[0]]); /* inner nodes */ for(idx_t m=1; m < ct->nmodes-1; ++m) { printf("[%"SPLATT_PF_IDX"] ", ft->nfibs[m]); for(idx_t f=0; f < ft->nfibs[m]; ++f) { printf(" (%"SPLATT_PF_IDX", %"SPLATT_PF_FIDX")", ft->fptr[m][f], ft->fids[m][f]); } printf(" %"SPLATT_PF_IDX"\n", ft->fptr[m][ft->nfibs[m]]); } /* vals/inds */ printf("[%"SPLATT_PF_IDX"] ", ft->nfibs[ct->nmodes-1]); for(idx_t f=0; f < ft->nfibs[ct->nmodes-1]; ++f) { printf(" %3"SPLATT_PF_FIDX"", ft->fids[ct->nmodes-1][f]); } printf("\n"); for(idx_t n=0; n < ft->nfibs[ct->nmodes-1]; ++n) { printf(" %0.1f", ft->vals[n]); } printf("\n"); } printf("-----------\n\n"); } static void p_set_nfibs_root( splatt_csf * const ct, sptensor_t const * const tt, idx_t const tile_id, idx_t const * const nnztile_ptr) { idx_t const nnzstart = nnztile_ptr[tile_id]; idx_t const nnzend = nnztile_ptr[tile_id+1]; idx_t const nnz = nnzend - nnzstart; assert(nnzstart < nnzend); /* the mode after accounting for dim_perm */ fidx_t const * const restrict ttind = tt->ind[ct->dim_perm[0]] + nnzstart; /* grab sparsity pattern */ csf_sparsity * const pt = ct->pt + tile_id; /* count fibers */ idx_t nfibs = 1; for(idx_t x=1; x < nnz; ++x) { assert(ttind[x-1] <= ttind[x]); if(ttind[x] != ttind[x-1]) { ++nfibs; } } ct->pt[tile_id].nfibs[0] = nfibs; assert(nfibs <= ct->dims[ct->dim_perm[0]]); } /** * @brief Construct the sparsity structure of the outer-mode of a CSF tensor. * * @param ct The CSF tensor to construct. * @param tt The coordinate tensor to construct from. Assumed to be already * sorted. * @param tile_id The ID of the tile to construct. * @param nnztile_ptr A pointer into 'tt' that marks the start of each tile. */ static void p_mk_outerptr_hub( splatt_csf * const ct, sptensor_t const * const tt, idx_t const tile_id, idx_t const * const nnztile_ptr) { idx_t const nnzstart = nnztile_ptr[tile_id]; idx_t const nnzend = nnztile_ptr[tile_id+1]; idx_t const nnz = nnzend - nnzstart; assert(nnzstart < nnzend); /* the mode after accounting for dim_perm */ fidx_t const * const restrict ttind = tt->ind[ct->dim_perm[0]] + nnzstart; /* grab sparsity pattern */ csf_sparsity * const pt = ct->pt + tile_id; idx_t *nfibs = malloc(sizeof(idx_t)*(omp_get_max_threads() + 1)); nfibs[0] = 1; #pragma omp parallel { int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); idx_t x_per_thread = (nnz + nthreads - 1)/nthreads; idx_t x_begin = SS_MAX(SS_MIN(x_per_thread*tid, nnz), 1); idx_t x_end = SS_MIN(x_per_thread*(tid + 1), nnz); idx_t nfibs_private = 0; for(idx_t x = x_begin; x < x_end; ++x) { assert(ttind[x-1] <= ttind[x]); if(ttind[x] != ttind[x-1]) { ++nfibs_private; } } nfibs[tid + 1] = nfibs_private; #pragma omp barrier #pragma omp master { /* prefix sum */ for(int t = 0; t < nthreads; ++t) { nfibs[t + 1] += nfibs[t]; } ct->pt[tile_id].nfibs[0] = nfibs[nthreads]; assert(nfibs[nthreads] <= ct->dims[ct->dim_perm[0]]); pt->fptr[0] = p_alloc_fptr(nfibs[nthreads] + 1); if(ct->ntiles > 1) { pt->fids[0] = p_alloc_fids(nfibs[nthreads]); } else { pt->fids[0] = NULL; } } #pragma omp barrier idx_t * const restrict fp = pt->fptr[0]; fidx_t * const restrict fi = pt->fids[0]; #pragma omp master { fp[0] = 0; if(fi != NULL) { fi[0] = ttind[0]; } fp[nfibs[nthreads]] = nnz; } idx_t nfound = nfibs[tid]; if(fi != NULL) { for(idx_t n=x_begin; n < x_end; ++n) { /* check for end of outer index */ if(ttind[n] != ttind[n-1]) { fi[nfound] = ttind[n]; fp[nfound++] = n; } } } else { for(idx_t n=x_begin; n < x_end; ++n) { /* check for end of outer index */ if(ttind[n] != ttind[n-1]) { assert(nfound == ttind[n]); fp[nfound++] = n; } } } } /* omp parallel */ } /** * @brief Construct the sparsity structure of the outer-mode of a CSF tensor. * * @param ct The CSF tensor to construct. * @param tt The coordinate tensor to construct from. Assumed to be already * sorted. * @param tile_id The ID of the tile to construct. * @param nnztile_ptr A pointer into 'tt' that marks the start of each tile. */ static void p_mk_outerptr( splatt_csf * const ct, sptensor_t const * const tt, idx_t const tile_id, idx_t const * const nnztile_ptr) { idx_t const nnzstart = nnztile_ptr[tile_id]; idx_t const nnzend = nnztile_ptr[tile_id+1]; idx_t const nnz = nnzend - nnzstart; assert(nnzstart < nnzend); /* the mode after accounting for dim_perm */ fidx_t const * const restrict ttind = tt->ind[ct->dim_perm[0]] + nnzstart; /* grab sparsity pattern */ csf_sparsity * const pt = ct->pt + tile_id; if(omp_in_parallel()) { /* nfibs already counted in p_set_nfibs_root */ idx_t nfibs = ct->pt[tile_id].nfibs[0]; idx_t * const restrict fp = pt->fptr[0]; fidx_t * const restrict fi = pt->fids[0]; fp[0] = 0; if(fi != NULL) { fi[0] = ttind[0]; } idx_t nfound = 1; if(fi != NULL) { for(idx_t n=1; n < nnz; ++n) { /* check for end of outer index */ if(ttind[n] != ttind[n-1]) { fi[nfound] = ttind[n]; fp[nfound++] = n; } } } else { for(idx_t n=1; n < nnz; ++n) { /* check for end of outer index */ if(ttind[n] != ttind[n-1]) { assert(nfound == ttind[n]); fp[nfound++] = n; } } } fp[nfibs] = nnz; } /* omp_in_parallel */ else { idx_t *nfibs = malloc(sizeof(*nfibs)*(omp_get_max_threads() + 1)); nfibs[0] = 1; #pragma omp parallel { int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); idx_t x_per_thread = (nnz + nthreads - 1)/nthreads; idx_t x_begin = SS_MAX(SS_MIN(x_per_thread*tid, nnz), 1); idx_t x_end = SS_MIN(x_per_thread*(tid + 1), nnz); idx_t nfibs_private = 0; for(idx_t x = x_begin; x < x_end; ++x) { assert(ct->nslice_hubs > 0 || ttind[x-1] <= ttind[x]); if(ttind[x] != ttind[x-1]) { ++nfibs_private; } } nfibs[tid + 1] = nfibs_private; #pragma omp barrier #pragma omp master { /* prefix sum */ for(int t = 0; t < nthreads; ++t) { nfibs[t + 1] += nfibs[t]; } ct->pt[tile_id].nfibs[0] = nfibs[nthreads]; assert(nfibs[nthreads] <= ct->dims[ct->dim_perm[0]]); pt->fptr[0] = p_alloc_fptr(nfibs[nthreads] + 1); if(ct->ntiles > 1 || ct->nslice_hubs > 0) { pt->fids[0] = p_alloc_fids(nfibs[nthreads]); } else { pt->fids[0] = NULL; } } #pragma omp barrier idx_t * const restrict fp = pt->fptr[0]; fidx_t * const restrict fi = pt->fids[0]; #pragma omp master { fp[0] = 0; if(fi != NULL) { fi[0] = ttind[0]; } fp[nfibs[nthreads]] = nnz; } idx_t nfound = nfibs[tid]; if(fi != NULL) { for(idx_t n=x_begin; n < x_end; ++n) { /* check for end of outer index */ if(ttind[n] != ttind[n-1]) { fi[nfound] = ttind[n]; fp[nfound++] = n; } } } else { for(idx_t n=x_begin; n < x_end; ++n) { /* check for end of outer index */ if(ttind[n] != ttind[n-1]) { assert(nfound == ttind[n]); fp[nfound++] = n; } } } } /* omp parallel */ } /* !omp_in_parallel */ } static void p_set_nfibs( splatt_csf * const ct, sptensor_t const * const tt, idx_t const tile_id, idx_t const * const nnztile_ptr, idx_t const mode) { assert(mode < ct->nmodes); idx_t const nnzstart = nnztile_ptr[tile_id]; idx_t const nnzend = nnztile_ptr[tile_id+1]; idx_t const nnz = nnzend - nnzstart; /* outer mode is easy; just look at outer indices */ if(mode == 0) { p_set_nfibs_root(ct, tt, tile_id, nnztile_ptr); return; } /* the mode after accounting for dim_perm */ fidx_t const * const restrict ttind = tt->ind[ct->dim_perm[mode]] + nnzstart; csf_sparsity * const pt = ct->pt + tile_id; /* we will edit this to point to the new fiber idxs instead of nnz */ idx_t * const restrict fprev = pt->fptr[mode-1]; /* first count nfibers */ double t = omp_get_wtime(); idx_t nfibs = 0; /* foreach 'slice' in the previous dimension */ for(idx_t s=0; s < pt->nfibs[mode-1]; ++s) { ++nfibs; /* one by default per 'slice' */ /* count fibers in current hyperplane*/ for(idx_t f=fprev[s]+1; f < fprev[s+1]; ++f) { if(ttind[f] != ttind[f-1]) { ++nfibs; } } } pt->nfibs[mode] = nfibs; } /** * @brief Construct the sparsity structure of any mode but the last. The first * (root) mode is handled by p_mk_outerptr and the first is simply a copy * of the nonzeros. * * @param ct The CSF tensor to construct. * @param tt The coordinate tensor to construct from. Assumed to be already * sorted. * @param tile_id The ID of the tile to construct. * @param nnztile_ptr A pointer into 'tt' that marks the start of each tile. * @param mode Which mode we are constructing. */ static void p_mk_fptr( splatt_csf * const ct, sptensor_t const * const tt, idx_t const tile_id, idx_t const * const nnztile_ptr, idx_t const mode) { assert(mode < ct->nmodes); idx_t const nnzstart = nnztile_ptr[tile_id]; idx_t const nnzend = nnztile_ptr[tile_id+1]; idx_t const nnz = nnzend - nnzstart; /* outer mode is easy; just look at outer indices */ if(mode == 0) { p_mk_outerptr(ct, tt, tile_id, nnztile_ptr); return; } /* the mode after accounting for dim_perm */ fidx_t const * const restrict ttind = tt->ind[ct->dim_perm[mode]] + nnzstart; csf_sparsity * const pt = ct->pt + tile_id; /* we will edit this to point to the new fiber idxs instead of nnz */ idx_t * const restrict fprev = pt->fptr[mode-1]; if(omp_in_parallel()) { /* nfibers already counted in p_set_nfibs */ idx_t nfibs = pt->nfibs[mode]; idx_t * const restrict fp = pt->fptr[mode]; fidx_t * const restrict fi = pt->fids[mode]; fp[0] = 0; /* now fill in fiber info */ idx_t nfound = 0; for(idx_t s=0; s < pt->nfibs[mode-1]; ++s) { idx_t const start = fprev[s]+1; idx_t const end = fprev[s+1]; /* mark start of subtree */ fprev[s] = nfound; fi[nfound] = ttind[start-1]; fp[nfound++] = start-1; /* mark fibers in current hyperplane */ for(idx_t f=start; f < end; ++f) { if(ttind[f] != ttind[f-1]) { fi[nfound] = ttind[f]; fp[nfound++] = f; } } } /* mark end of last hyperplane */ fprev[pt->nfibs[mode-1]] = nfibs; fp[nfibs] = nnz; } /* omp_in_parallel */ else { idx_t nfibs[omp_get_max_threads() + 1]; nfibs[0] = 0; #pragma omp parallel { int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); idx_t s_per_thread = (pt->nfibs[mode-1] + nthreads - 1)/nthreads; idx_t s_begin = SS_MIN(s_per_thread*tid, pt->nfibs[mode-1]); idx_t s_end = SS_MIN(s_begin + s_per_thread, pt->nfibs[mode-1]); /* first count nfibers */ idx_t nfibs_private = 0; /* foreach 'slice' in the previous dimension */ for(idx_t s=s_begin; s < s_end; ++s) { ++nfibs_private; /* one by default per 'slice' */ /* count fibers in current hyperplane*/ for(idx_t f=fprev[s]+1; f < fprev[s+1]; ++f) { if(ttind[f] != ttind[f-1]) { ++nfibs_private; } } } nfibs[tid + 1] = nfibs_private; idx_t fprev_end = fprev[s_end]; #pragma omp barrier #pragma omp master { /* prefix sum */ for(int t = 0; t < nthreads; ++t) { nfibs[t + 1] += nfibs[t]; } pt->nfibs[mode] = nfibs[nthreads]; pt->fptr[mode] = p_alloc_fptr(nfibs[nthreads] + 1); pt->fids[mode] = p_alloc_fids(nfibs[nthreads]); } #pragma omp barrier idx_t * const restrict fp = pt->fptr[mode]; fidx_t * const restrict fi = pt->fids[mode]; #pragma omp master { fp[0] = 0; } /* now fill in fiber info */ idx_t nfound = nfibs[tid]; for(idx_t s=s_begin; s < s_end; ++s) { idx_t const start = fprev[s]+1; idx_t const end = s == s_end - 1 ? fprev_end : fprev[s+1]; /* mark start of subtree */ fprev[s] = nfound; fi[nfound] = ttind[start-1]; fp[nfound++] = start-1; /* mark fibers in current hyperplane */ for(idx_t f=start; f < end; ++f) { if(ttind[f] != ttind[f-1]) { fi[nfound] = ttind[f]; fp[nfound++] = f; } } } if(tid == nthreads - 1) { /* mark end of last hyperplane */ fprev[pt->nfibs[mode-1]] = nfibs[nthreads]; fp[nfibs[nthreads]] = nnz; } } /* omp parallel */ } /* !omp_in_parallel */ } /** * @brief Allocate and fill a CSF tensor from a coordinate tensor without * tiling. * * @param ct The CSF tensor to fill out. * @param tt The sparse tensor to start from. */ static void p_csf_alloc_untiled( splatt_csf * const ct, sptensor_t * const tt) { idx_t const nmodes = tt->nmodes; tt_sort(tt, ct->dim_perm[0], ct->dim_perm); ct->ntiles = 1; for(idx_t m=0; m < nmodes; ++m) { ct->tile_dims[m] = 1; } ct->pt = splatt_malloc(sizeof(*(ct->pt))); csf_sparsity * const pt = ct->pt; /* check hub slices in root mode */ idx_t last_fp = 0; ct->hub_slices = (splatt_fidx_t *)splatt_malloc(4 * omp_get_max_threads() * sizeof(int)); ct->nslice_hubs = 0; idx_t nnz_hubs = 0; #define FINE_GRAIN_PARTITION_OF_HUBS #ifdef FINE_GRAIN_PARTITION_OF_HUBS for(idx_t i=1; i < ct->nnz; ++i) { if(tt->ind[ct->dim_perm[0]][i] != tt->ind[ct->dim_perm[0]][i-1]) { if(i - last_fp >= 0.5*ct->nnz/omp_get_max_threads()) { ct->hub_slices[ct->nslice_hubs++] = tt->ind[ct->dim_perm[0]][i-1]; nnz_hubs += i - last_fp; assert(ct->nslice_hubs < 4 * omp_get_max_threads()); printf("%d hub slice\n", ct->hub_slices[ct->nslice_hubs - 1]); } last_fp = i; } } if(ct->nnz - last_fp >= 0.5*ct->nnz/omp_get_max_threads()) { ct->hub_slices[ct->nslice_hubs++] = tt->ind[ct->dim_perm[0]][ct->nnz - 1]; nnz_hubs += ct->nnz - last_fp; printf("%d hub slice\n", ct->hub_slices[ct->nslice_hubs - 1]); } printf("nslice_hubs = %d nnz_hubs = %ld\n", ct->nslice_hubs, nnz_hubs); #endif if(ct->nslice_hubs > 0) { fidx_t ** new_ind = splatt_malloc(nmodes*sizeof(*new_ind)); for(idx_t i=0; i < nmodes; ++i) { #if SPLATT_SPTENSOR_HBW new_ind[i] = splatt_hbw_malloc(tt->nnz * sizeof(**new_ind)); #else new_ind[i] = splatt_malloc(tt->nnz * sizeof(**new_ind)); #endif } #if SPLATT_SPTENSOR_HBW storage_val_t * new_vals = splatt_hbw_malloc(tt->nnz * sizeof(*new_vals)); #else storage_val_t * new_vals = splatt_malloc(tt->nnz * sizeof(*new_vals)); #endif idx_t non_hub_idx = 0, hub_idx = tt->nnz - nnz_hubs; int hub_slice_idx = 0; for (idx_t i=0; i < ct->nnz; ++i) { while(ct->hub_slices[hub_slice_idx] < tt->ind[ct->dim_perm[0]][i] && hub_slice_idx < ct->nslice_hubs) { ++hub_slice_idx; } if(ct->hub_slices[hub_slice_idx] == tt->ind[ct->dim_perm[0]][i]) { if(hub_idx >= tt->nnz) { printf("i=%ld hub_slice_idx=%d\n", i, hub_slice_idx); } assert(hub_idx < tt->nnz); for(int m=0; m < nmodes; ++m) { new_ind[m][hub_idx] = tt->ind[m][i]; } new_vals[hub_idx] = tt->vals[i]; ++hub_idx; } else { assert(non_hub_idx < tt->nnz - nnz_hubs); for(int m=0; m < nmodes; ++m) { new_ind[m][non_hub_idx] = tt->ind[m][i]; } new_vals[non_hub_idx] = tt->vals[i]; ++non_hub_idx; } } assert(non_hub_idx == tt->nnz - nnz_hubs); assert(hub_idx == tt->nnz); for(int m=0; m < nmodes; ++m) { #if SPLATT_SPTENSOR_HBW splatt_hbw_free(tt->ind[m]); #else splatt_free(tt->ind[m]); #endif tt->ind[m] = new_ind[m]; } splatt_free(new_ind); #if SPLATT_SPTENSOR_HBW splatt_hbw_free(tt->vals); #else splatt_free(tt->vals); #endif tt->vals = new_vals; } else { ct->hub_slices = NULL; ct->nslice_hubs = 0; } /* last row of fptr is just nonzero inds */ pt->nfibs[nmodes-1] = ct->nnz; pt->fids[nmodes-1] = p_alloc_fids(ct->nnz); pt->vals = p_alloc_vals(ct->nnz); #pragma omp parallel for for (idx_t i=0; i < ct->nnz; ++i) { pt->fids[nmodes-1][i] = tt->ind[ct->dim_perm[nmodes-1]][i]; } #pragma omp parallel for for(idx_t i=0; i < ct->nnz; ++i) { pt->vals[i] = tt->vals[i]; } /* setup a basic tile ptr for one tile */ idx_t nnz_ptr[2]; nnz_ptr[0] = 0; nnz_ptr[1] = tt->nnz; /* create fptr entries for the rest of the modes, working down from roots. * Skip the bottom level (nnz) */ for(idx_t m=0; m < tt->nmodes-1; ++m) { p_mk_fptr(ct, tt, 0, nnz_ptr, m); } } /** * @brief Reorder the nonzeros in a sparse tensor using dense tiling and fill * a CSF tensor with the data. * * @param ct The CSF tensor to fill. * @param tt The sparse tensor to start from. * @param splatt_opts Options array for SPLATT - used for tile dimensions. */ static void p_csf_alloc_densetile( splatt_csf * const ct, sptensor_t * const tt, double const * const splatt_opts) { idx_t const nmodes = tt->nmodes; idx_t ntiles = 1; for(idx_t m=0; m < ct->nmodes; ++m) { idx_t const depth = csf_mode_depth(m, ct->dim_perm, ct->nmodes); if(depth >= splatt_opts[SPLATT_OPTION_TILEDEPTH]) { ct->tile_dims[m] = (idx_t) splatt_opts[SPLATT_OPTION_NTHREADS]; } else { ct->tile_dims[m] = 1; } ntiles *= ct->tile_dims[m]; } /* perform tensor tiling */ tt_sort(tt, ct->dim_perm[0], ct->dim_perm); idx_t * nnz_ptr = tt_densetile(tt, ct->tile_dims); ct->ntiles = ntiles; ct->pt = splatt_malloc(ntiles * sizeof(*(ct->pt))); ct->hub_slices = NULL; ct->nslice_hubs = 0; fidx_t * fids_buf = NULL; val_t * vals_buf = NULL; fids_buf = p_alloc_fids(ct->nnz); vals_buf = p_alloc_vals(ct->nnz); for(idx_t m=0; m < nmodes-1; ++m) { #pragma omp parallel for if (ntiles > 1) schedule(dynamic, 1) for(idx_t t=0; t < ntiles; ++t) { idx_t const startnnz = nnz_ptr[t]; idx_t const endnnz = nnz_ptr[t+1]; assert(endnnz >= startnnz); idx_t const ptnnz = endnnz - startnnz; csf_sparsity * const pt = ct->pt + t; /* empty tile */ if(ptnnz == 0) { if(0 == m) { for(idx_t i=0; i < ct->nmodes; ++i) { pt->fptr[i] = NULL; pt->fids[i] = NULL; pt->nfibs[i] = 0; } /* first fptr may be accessed anyway */ if(!omp_in_parallel()) { pt->fptr[0] = p_alloc_fptr(2); pt->fptr[0][0] = 0; pt->fptr[0][1] = 0; } pt->vals = NULL; } } else { /* last row of fptr is just nonzero inds */ pt->nfibs[nmodes-1] = ptnnz; pt->fids[nmodes-1] = fids_buf + startnnz; if (omp_in_parallel()) { for (idx_t i = 0; i < ptnnz; ++i) { pt->fids[nmodes-1][i] = tt->ind[ct->dim_perm[nmodes-1]][startnnz + i]; } } else { #pragma omp parallel for for (idx_t i = 0; i < ptnnz; ++i) { pt->fids[nmodes-1][i] = tt->ind[ct->dim_perm[nmodes-1]][startnnz + i]; } } pt->vals = vals_buf + startnnz; if (omp_in_parallel()) { for(idx_t j=0; j < ptnnz; ++j) { pt->vals[j] = tt->vals[startnnz + j]; } } else { #pragma omp parallel for for(idx_t j=0; j < ptnnz; ++j) { pt->vals[j] = tt->vals[startnnz + j]; } } /* create fptr entries for the rest of the modes */ if(omp_in_parallel()) { p_set_nfibs(ct, tt, t, nnz_ptr, m); } else { p_mk_fptr(ct, tt, t, nnz_ptr, m); } } } /* for each tile */ if(ntiles > 1) { idx_t nfibs_acc = 0; idx_t nempty = 0; for(idx_t t=0; t < ntiles; ++t) { if(nnz_ptr[t+1] - nnz_ptr[t] == 0) { if(0 == m) ++nempty; } else nfibs_acc += ct->pt[t].nfibs[m]; } ct->pt[0].fptr[m] = p_alloc_fptr(nfibs_acc + ntiles + nempty); ct->pt[0].fids[m] = p_alloc_fids(nfibs_acc); nfibs_acc = 0; nempty = 0; for(idx_t t=0; t < ntiles; ++t) { ct->pt[t].fptr[m] = ct->pt[0].fptr[m] + nfibs_acc + t + nempty; ct->pt[t].fids[m] = ct->pt[0].fids[m] + nfibs_acc; if(nnz_ptr[t+1] - nnz_ptr[t] == 0) { if(0 == m) { ct->pt[t].fptr[0][0] = 0; ct->pt[t].fptr[0][1] = 0; ++nempty; } } else nfibs_acc += ct->pt[t].nfibs[m]; } #pragma omp parallel for schedule(dynamic, 1) for(idx_t t=0; t < ntiles; ++t) { if(nnz_ptr[t+1] > nnz_ptr[t]) { p_mk_fptr(ct, tt, t, nnz_ptr, m); } } /* for each tile */ } } /* for each mode */ splatt_free(nnz_ptr); } /** * @brief Allocate and fill a CSF tensor. * * @param ct The CSF tensor to fill. * @param tt The coordinate tensor to work from. * @param mode_type The allocation scheme for the CSF tensor. * @param mode Which mode we are converting for (if applicable). * @param splatt_opts Used to determine tiling scheme. */ static void p_mk_csf( splatt_csf * const ct, sptensor_t * const tt, csf_mode_type mode_type, idx_t const mode, double const * const splatt_opts) { ct->nnz = tt->nnz; ct->nmodes = tt->nmodes; for(idx_t m=0; m < tt->nmodes; ++m) { ct->dims[m] = tt->dims[m]; } /* get the indices in order */ csf_find_mode_order(tt->dims, tt->nmodes, mode_type, mode, ct->dim_perm, ct->nnz, splatt_opts); ct->which_tile = (splatt_tile_type)splatt_opts[SPLATT_OPTION_TILE]; switch(ct->which_tile) { case SPLATT_NOTILE: p_csf_alloc_untiled(ct, tt); break; case SPLATT_DENSETILE: p_csf_alloc_densetile(ct, tt, splatt_opts); break; default: fprintf(stderr, "SPLATT: tiling '%d' unsupported for CSF tensors.\n", ct->which_tile); break; } } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void csf_free( splatt_csf * const csf, double const * const opts) { idx_t ntensors = 0; splatt_csf_type which = (splatt_csf_type)opts[SPLATT_OPTION_CSF_ALLOC]; switch(which) { case SPLATT_CSF_ONEMODE: ntensors = 1; break; case SPLATT_CSF_TWOMODE: ntensors = 2; break; case SPLATT_CSF_ALLMODE: ntensors = csf[0].nmodes; break; } for(idx_t i=0; i < ntensors; ++i) { csf_free_mode(csf + i); } splatt_free(csf); } void csf_free_mode( splatt_csf * const csf) { /* * Free each tile of sparsity pattern. All tiles work on the same contiguous * buffer, so only free once. */ p_free_vals(csf->pt[0].vals); p_free_fids(csf->pt[0].fids[csf->nmodes-1]); for(idx_t m=0; m < csf->nmodes-1; ++m) { p_free_fptr(csf->pt[0].fptr[m]); p_free_fids(csf->pt[0].fids[m]); } splatt_free(csf->hub_slices); splatt_free(csf->pt); } void csf_find_mode_order( idx_t const * const dims, idx_t const nmodes, csf_mode_type which, idx_t const mode, idx_t * const perm_dims, idx_t nnz, double const * const opts) { switch(which) { case CSF_SORTED_SMALLFIRST: if(nmodes >= 6) { // FIXME: temporaily using this for outpatient6 p_order_dims_small(dims, nmodes, perm_dims); } else { p_order_dims_small_no_privatization(dims, nmodes, perm_dims, nnz, opts); } break; case CSF_SORTED_BIGFIRST: p_order_dims_large(dims, nmodes, perm_dims); break; case CSF_INORDER_MINUSONE: p_order_dims_inorder(dims, nmodes, mode, perm_dims); break; case CSF_SORTED_MINUSONE: p_order_dims_minusone(dims, nmodes, mode, perm_dims); break; case CSF_ROUND_ROBIN: p_order_dims_round_robin(dims, nmodes, mode, perm_dims); break; case CSF_ALLPERMUTE: p_order_dims_all_permute(dims, nmodes, mode, perm_dims); break; default: fprintf(stderr, "SPLATT: csf_mode_type '%d' not recognized.\n", which); break; } } size_t csf_storage( splatt_csf const * const tensors, double const * const opts) { idx_t ntensors = 0; splatt_csf_type which_alloc = (splatt_csf_type)opts[SPLATT_OPTION_CSF_ALLOC]; switch(which_alloc) { case SPLATT_CSF_ONEMODE: ntensors = 1; break; case SPLATT_CSF_TWOMODE: ntensors = 2; break; case SPLATT_CSF_ALLMODE: ntensors = tensors[0].nmodes; break; } size_t total_bytes = 0; for(idx_t m=0; m < ntensors; ++m) { size_t bytes = 0; splatt_csf * const ct = (splatt_csf *)(tensors + m); bytes += ct->nnz * sizeof(*(ct->pt->vals)); /* vals */ bytes += ct->nnz * sizeof(**(ct->pt->fids)); /* fids[nmodes] */ bytes += ct->ntiles * sizeof(*(ct->pt)); /* pt */ for(idx_t t=0; t < ct->ntiles; ++t) { csf_sparsity const * const pt = ct->pt + t; for(idx_t m=0; m < ct->nmodes-1; ++m) { bytes += (pt->nfibs[m]+1) * sizeof(**(pt->fptr)); /* fptr */ if(pt->fids[m] != NULL) { bytes += pt->nfibs[m] * sizeof(**(pt->fids)); /* fids */ } } } ct->storage = bytes; total_bytes += bytes; } return total_bytes; } splatt_csf * csf_alloc( sptensor_t * const tt, double const * const opts) { splatt_csf * ret = NULL; double * tmp_opts = NULL; idx_t last_mode = 0; int tmp = 0; switch((splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC]) { case SPLATT_CSF_ONEMODE: ret = splatt_malloc(sizeof(*ret)); p_mk_csf(ret, tt, CSF_SORTED_SMALLFIRST, 0, opts); break; case SPLATT_CSF_TWOMODE: ret = splatt_malloc(2 * sizeof(*ret)); /* regular CSF allocation */ p_mk_csf(ret + 0, tt, CSF_SORTED_SMALLFIRST, 0, opts); /* make a copy of opts and don't tile the last mode * TODO make this configurable? */ tmp_opts = splatt_default_opts(); memcpy(tmp_opts, opts, SPLATT_OPTION_NOPTIONS * sizeof(*opts)); tmp_opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE; /* allocate with no tiling for the last mode */ last_mode = ret[0].dim_perm[tt->nmodes-1]; p_mk_csf(ret + 1, tt, CSF_SORTED_MINUSONE, last_mode, tmp_opts); splatt_free_opts(tmp_opts); break; case SPLATT_CSF_ALLMODE: ret = splatt_malloc(tt->nmodes * sizeof(*ret)); for(idx_t m=0; m < tt->nmodes; ++m) { p_mk_csf(ret + m, tt, CSF_SORTED_MINUSONE, m, opts); } break; case SPLATT_CSF_ALLMODE_ROUND_ROBIN: ret = splatt_malloc(tt->nmodes * sizeof(*ret)); for(idx_t m=0; m < tt->nmodes; ++m) { p_mk_csf(ret + m, tt, CSF_ROUND_ROBIN, m, opts); } break; case SPLATT_CSF_ALLPERMUTE: assert(tt->nmodes == 3); ret = splatt_malloc(6 * sizeof(*ret)); for(idx_t m=0; m < 6; ++m) { p_mk_csf(ret + m, tt, CSF_ALLPERMUTE, m, opts); } break; } return ret; } void csf_alloc_mode( sptensor_t * const tt, csf_mode_type which_ordering, idx_t const mode_special, splatt_csf * const csf, double const * const opts) { p_mk_csf(csf, tt, which_ordering, mode_special, opts); } val_t csf_frobsq( splatt_csf const * const tensor) { idx_t const nmodes = tensor->nmodes; val_t norm = 0; #pragma omp parallel reduction(+:norm) { for(idx_t t=0; t < tensor->ntiles; ++t) { val_t const * const vals = tensor->pt[t].vals; if(vals == NULL) { continue; } idx_t const nnz = tensor->pt[t].nfibs[nmodes-1]; #pragma omp for nowait for(idx_t n=0; n < nnz; ++n) { norm += vals[n] * vals[n]; } } } return norm; } int csf_get_ncopies(double *opts, int nmodes) { int ncopies = -1; splatt_csf_type which_csf = (splatt_csf_type)opts[SPLATT_OPTION_CSF_ALLOC]; switch(which_csf) { case SPLATT_CSF_ONEMODE: ncopies = 1; break; case SPLATT_CSF_TWOMODE: ncopies = 2; break; case SPLATT_CSF_ALLMODE: ncopies = nmodes; break; } return ncopies; }
FullyDistSpVec.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.2 -------------------------------------------------*/ /* date: 10/06/2011 --------------------------------------------*/ /* authors: Aydin Buluc (abuluc@lbl.gov), Adam Lugowski --------*/ /****************************************************************/ /* Copyright (c) 2011, Aydin Buluc Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _FULLY_DIST_SP_VEC_H_ #define _FULLY_DIST_SP_VEC_H_ #include <iostream> #include <vector> #include <utility> #include <unordered_set> #include "CommGrid.h" #include "promote.h" #include "SpParMat.h" #include "FullyDist.h" #include "Exception.h" #include "OptBuf.h" #include "CombBLAS.h" template <class IT, class NT, class DER> class SpParMat; template <class IT> class DistEdgeList; template <class IU, class NU> class FullyDistVec; template <class IU, class NU> class SparseVectorLocalIterator; /** * A sparse vector of length n (with nnz <= n of them being nonzeros) is distributed to * "all the processors" in a way that "respects ordering" of the nonzero indices * Example: x = [5,1,6,2,9] for nnz(x)=5 and length(x)=12 * we use 4 processors P_00, P_01, P_10, P_11 * Then P_00 owns [1,2] (in the range [0,...,2]), P_01 ow`ns [5] (in the range [3,...,5]), and so on. * In the case of A(v,w) type sparse matrix indexing, this doesn't matter because n = nnz * After all, A(v,w) will have dimensions length(v) x length (w) * v and w will be of numerical type (NT) "int" and their indices (IT) will be consecutive integers * It is possibly that nonzero counts are distributed unevenly * Example: x=[1,2,3,4,5] and length(x) = 20, then P_00 would own all the nonzeros and the rest will hold empry vectors * Just like in SpParMat case, indices are local to processors (they belong to range [0,...,length-1] on each processor) * \warning Always create vectors with the right length, setting elements won't increase its length (similar to operator[] on std::vector) **/ template <class IT, class NT> class FullyDistSpVec: public FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type> { public: FullyDistSpVec ( ); FullyDistSpVec ( IT glen ); FullyDistSpVec ( shared_ptr<CommGrid> grid); FullyDistSpVec ( shared_ptr<CommGrid> grid, IT glen); FullyDistSpVec (const FullyDistVec<IT,NT> & rhs); // Conversion copy-constructor //! like operator=, but instead of making a deep copy it just steals the contents. //! Useful for places where the "victim" will be distroyed immediately after the call. void stealFrom(FullyDistSpVec<IT,NT> & victim); FullyDistSpVec<IT,NT> & operator=(const FullyDistSpVec< IT,NT > & rhs); FullyDistSpVec<IT,NT> & operator=(const FullyDistVec< IT,NT > & rhs); // convert from dense FullyDistSpVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs); FullyDistSpVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs); class ScalarReadSaveHandler { public: NT getNoNum(IT index) { return static_cast<NT>(1); } template <typename c, typename t> NT read(std::basic_istream<c,t>& is, IT index) { NT v; is >> v; return v; } template <typename c, typename t> void save(std::basic_ostream<c,t>& os, const NT& v, IT index) { os << v; } }; template <class HANDLER> ifstream& ReadDistribute (ifstream& infile, int master, HANDLER handler); ifstream& ReadDistribute (ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); } template <class HANDLER> void SaveGathered(ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false); void SaveGathered(ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler()); } template <typename NNT> operator FullyDistSpVec< IT,NNT > () const //!< Type conversion operator { FullyDistSpVec<IT,NNT> CVT(commGrid); CVT.ind = vector<IT>(ind.begin(), ind.end()); CVT.num = vector<NNT>(num.begin(), num.end()); CVT.glen = glen; return CVT; } bool operator==(const FullyDistSpVec<IT,NT> & rhs) const { FullyDistVec<IT,NT> v = *this; FullyDistVec<IT,NT> w = rhs; return (v == w); } void PrintInfo(string vecname) const; void iota(IT globalsize, NT first); FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //!< SpRef (expects ri to be 0-based) void SetElement (IT indx, NT numx); // element-wise assignment void DelElement (IT indx); // element-wise deletion /** * @brief Remove elements in index from the set */ template <typename E> void removeFromHash(std::unordered_set<E> &localUnvisitedVertices) { for(auto e: ind) { localUnvisitedVertices.erase(e); } } NT operator[](IT indx); bool WasFound() const { return wasFound; } // sort the vector itself // return the permutation vector (0-based) FullyDistSpVec<IT, IT> sort(); IT getlocnnz() const { return ind.size(); } IT getnnz() const { IT totnnz = 0; IT locnnz = ind.size(); MPI_Allreduce( &locnnz, &totnnz, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld()); return totnnz; } using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::LengthUntil; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::MyLocLength; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::MyRowLength; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::TotalLength; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::Owner; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::RowLenUntil; void setNumToInd() { IT offset = LengthUntil(); IT spsize = ind.size(); #ifdef _OPENMP #pragma omp parallel for #endif for(IT i=0; i< spsize; ++i) num[i] = ind[i] + offset; } template <typename _Predicate> IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true template <typename _UnaryOperation> void Apply(_UnaryOperation __unary_op) { transform(num.begin(), num.end(), num.begin(), __unary_op); } template <typename _BinaryOperation> void ApplyInd(_BinaryOperation __binary_op) { IT offset = LengthUntil(); IT spsize = ind.size(); #ifdef _OPENMP #pragma omp parallel for #endif for(IT i=0; i < spsize; ++i) num[i] = __binary_op(num[i], ind[i] + offset); } template <typename _BinaryOperation> NT Reduce(_BinaryOperation __binary_op, NT init); template <typename OUT, typename _BinaryOperation, typename _UnaryOperation> OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op); void DebugPrint(); shared_ptr<CommGrid> getcommgrid() const { return commGrid; } void Reset(); NT GetLocalElement(IT indx); void BulkSet(IT inds[], int count); protected: using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::glen; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::commGrid; private: vector< IT > ind; // ind.size() give the number of nonzeros vector< NT > num; bool wasFound; // true if the last GetElement operation returned an actual value. template <class IU, class NU> friend class FullyDistSpVec; template <class IU, class NU> friend class FullyDistVec; template <class IU, class NU, class UDER> friend class SpParMat; template <class IU, class NU> friend class SparseVectorLocalIterator; template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> friend FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x ); template <typename SR, typename IU, typename NUM, typename UDER> friend FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue); template <typename VT, typename IU, typename UDER> // NoSR version (in BFSFriends.h) friend FullyDistSpVec<IU,VT> SpMV (const SpParMat<IU,bool,UDER> & A, const FullyDistSpVec<IU,VT> & x, OptBuf<int32_t, VT > & optbuf); template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> friend void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,bool indexisvalue, OptBuf<int32_t, OVT > & optbuf); template <typename IU, typename NU1, typename NU2> friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero); template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> friend FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp); template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> friend FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp); template <typename IU> friend void RandPerm(FullyDistSpVec<IU,IU> & V); // called on an existing object, randomly permutes it template <typename IU> friend void RenameVertices(DistEdgeList<IU> & DEL); //! Helper functions for sparse matrix X sparse vector template <typename SR, typename IU, typename OVT> friend void MergeContributions(FullyDistSpVec<IU,OVT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, OVT * & recvnumbuf, int rowneighs); template <typename IU, typename VT> friend void MergeContributions(FullyDistSpVec<IU,VT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, VT * & recvnumbuf, int rowneighs); template<typename IU, typename NV> friend void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue); }; #include "FullyDistSpVec.cpp" #endif
serial_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include <set> #include "col_sampler.hpp" #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "monotone_constraints.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered Hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif namespace LightGBM { using json11::Json; /*! \brief forward declaration */ class CostEfficientGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: friend CostEfficientGradientBoosting; explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data, bool is_constant_hessian) override { ResetTrainingDataInner(train_data, is_constant_hessian, true); } void ResetIsConstantHessian(bool is_constant_hessian) override { share_state_->is_constant_hessian = is_constant_hessian; } virtual void ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin); void ResetConfig(const Config* config) override; inline void SetForcedSplit(const Json* forced_split_json) override { if (forced_split_json != nullptr && !forced_split_json->is_null()) { forced_split_json_ = forced_split_json; } else { forced_split_json_ = nullptr; } } Tree* Train(const score_t* gradients, const score_t *hessians, bool is_first_tree) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) const override; void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override { if (subset == nullptr) { data_partition_->SetUsedDataIndices(used_indices, num_data); share_state_->SetUseSubrow(false); } else { ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false); share_state_->SetUseSubrow(true); share_state_->SetSubrowCopied(false); share_state_->bagging_use_indices = used_indices; share_state_->bagging_indices_cnt = num_data; } } void AddPredictionToScore(const Tree* tree, double* out_score) const override { CHECK_LE(tree->num_leaves(), data_partition_->num_leaves()); if (tree->num_leaves() <= 1) { return; } #pragma omp parallel for schedule(static, 1) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; /*! \brief Get output of parent node, used for path smoothing */ double GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const; protected: void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_, int feature_index, int real_fidx, int8_t is_feature_used, int num_data, const LeafSplits* leaf_splits, SplitInfo* best_split, double parent_output); void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time); void RecomputeBestSplitForLeaf(Tree* tree, int leaf, SplitInfo* split); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(const Tree* tree); virtual void FindBestSplits(const Tree* tree, const std::set<int>* force_features); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree*); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf) { SplitInner(tree, best_leaf, left_leaf, right_leaf, true); } void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf, bool update_cnt); /* Force splits with forced_split_json dict and then return num splits forced.*/ int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf, int* cur_depth); std::set<int> FindAllForceFeatures(Json force_split_leaf_setting); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores minimum and maximum constraints for each leaf */ std::unique_ptr<LeafConstraintsBase> constraints_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; #if defined(USE_GPU) /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #elif defined(USE_CUDA) || defined(USE_CUDA_EXP) /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_; #endif /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; ColSampler col_sampler_; const Json* forced_split_json_; std::unique_ptr<TrainingShareStates> share_state_; std::unique_ptr<CostEfficientGradientBoosting> cegb_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
GB_unaryop__lnot_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_uint16 // op(A') function: GB_tran__lnot_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_uint16 ( uint16_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_1x1_pack4to1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack4to1_bf16s_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // interleave // src = inch-outch // dst = 4a-inch/4a-outch #if __aarch64__ kernel_tm_pack4.create(8, inch/4, outch/8 + (outch%8)/4 + outch%4, (size_t)2u*4, 4); #else kernel_tm_pack4.create(4, inch/4, outch/4 + outch%4, (size_t)2u*4, 4); #endif int p=0; #if __aarch64__ for (; p+7<outch; p+=8) { const float* k0 = (const float*)kernel + (p+0)*inch; const float* k1 = (const float*)kernel + (p+1)*inch; const float* k2 = (const float*)kernel + (p+2)*inch; const float* k3 = (const float*)kernel + (p+3)*inch; const float* k4 = (const float*)kernel + (p+4)*inch; const float* k5 = (const float*)kernel + (p+5)*inch; const float* k6 = (const float*)kernel + (p+6)*inch; const float* k7 = (const float*)kernel + (p+7)*inch; unsigned short* ktmp = kernel_tm_pack4.channel(p/8); for (int q=0; q+3<inch; q+=4) { ktmp[0] = float32_to_bfloat16(k0[0]); ktmp[1] = float32_to_bfloat16(k1[0]); ktmp[2] = float32_to_bfloat16(k2[0]); ktmp[3] = float32_to_bfloat16(k3[0]); ktmp[4] = float32_to_bfloat16(k4[0]); ktmp[5] = float32_to_bfloat16(k5[0]); ktmp[6] = float32_to_bfloat16(k6[0]); ktmp[7] = float32_to_bfloat16(k7[0]); ktmp[8] = float32_to_bfloat16(k0[1]); ktmp[9] = float32_to_bfloat16(k1[1]); ktmp[10] = float32_to_bfloat16(k2[1]); ktmp[11] = float32_to_bfloat16(k3[1]); ktmp[12] = float32_to_bfloat16(k4[1]); ktmp[13] = float32_to_bfloat16(k5[1]); ktmp[14] = float32_to_bfloat16(k6[1]); ktmp[15] = float32_to_bfloat16(k7[1]); ktmp[16] = float32_to_bfloat16(k0[2]); ktmp[17] = float32_to_bfloat16(k1[2]); ktmp[18] = float32_to_bfloat16(k2[2]); ktmp[19] = float32_to_bfloat16(k3[2]); ktmp[20] = float32_to_bfloat16(k4[2]); ktmp[21] = float32_to_bfloat16(k5[2]); ktmp[22] = float32_to_bfloat16(k6[2]); ktmp[23] = float32_to_bfloat16(k7[2]); ktmp[24] = float32_to_bfloat16(k0[3]); ktmp[25] = float32_to_bfloat16(k1[3]); ktmp[26] = float32_to_bfloat16(k2[3]); ktmp[27] = float32_to_bfloat16(k3[3]); ktmp[28] = float32_to_bfloat16(k4[3]); ktmp[29] = float32_to_bfloat16(k5[3]); ktmp[30] = float32_to_bfloat16(k6[3]); ktmp[31] = float32_to_bfloat16(k7[3]); k0 += 4; k1 += 4; k2 += 4; k3 += 4; k4 += 4; k5 += 4; k6 += 4; k7 += 4; ktmp += 32; } } #endif for (; p+3<outch; p+=4) { const float* k0 = (const float*)kernel + (p+0)*inch; const float* k1 = (const float*)kernel + (p+1)*inch; const float* k2 = (const float*)kernel + (p+2)*inch; const float* k3 = (const float*)kernel + (p+3)*inch; #if __aarch64__ unsigned short* ktmp = kernel_tm_pack4.channel(p/8 + (p%8)/4); #else unsigned short* ktmp = kernel_tm_pack4.channel(p/4); #endif for (int q=0; q+3<inch; q+=4) { ktmp[0] = float32_to_bfloat16(k0[0]); ktmp[1] = float32_to_bfloat16(k1[0]); ktmp[2] = float32_to_bfloat16(k2[0]); ktmp[3] = float32_to_bfloat16(k3[0]); ktmp[4] = float32_to_bfloat16(k0[1]); ktmp[5] = float32_to_bfloat16(k1[1]); ktmp[6] = float32_to_bfloat16(k2[1]); ktmp[7] = float32_to_bfloat16(k3[1]); ktmp[8] = float32_to_bfloat16(k0[2]); ktmp[9] = float32_to_bfloat16(k1[2]); ktmp[10] = float32_to_bfloat16(k2[2]); ktmp[11] = float32_to_bfloat16(k3[2]); ktmp[12] = float32_to_bfloat16(k0[3]); ktmp[13] = float32_to_bfloat16(k1[3]); ktmp[14] = float32_to_bfloat16(k2[3]); ktmp[15] = float32_to_bfloat16(k3[3]); k0 += 4; k1 += 4; k2 += 4; k3 += 4; ktmp += 16; } } for (; p<outch; p++) { const float* k0 = (const float*)kernel + p*inch; #if __aarch64__ unsigned short* ktmp = kernel_tm_pack4.channel(p/8 + (p%8)/4 + p%4); #else unsigned short* ktmp = kernel_tm_pack4.channel(p/4 + p%4); #endif for (int q=0; q+3<inch; q+=4) { ktmp[0] = float32_to_bfloat16(k0[0]); ktmp[1] = float32_to_bfloat16(k0[1]); ktmp[2] = float32_to_bfloat16(k0[2]); ktmp[3] = float32_to_bfloat16(k0[3]); k0 += 4; ktmp += 4; } } } static void conv1x1s1_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave Mat tmp; #if __aarch64__ if (size >= 12) tmp.create(12, inch, size/12 + (size%12)/8 + (size%12%8)/4 + size%12%4, elemsize, elempack, opt.workspace_allocator); else if (size >= 8) tmp.create(8, inch, size/8 + (size%8)/4 + size%4, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size/4 + size%4, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); #else if (size >= 8) tmp.create(8, inch, size/8 + (size%8)/4 + size%4, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size/4 + size%4, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); #endif { int nn_size; int remain_size_start; #if __aarch64__ nn_size = size / 12; remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 12; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; unsigned short* tmpptr = tmp.channel(i/12); for (int q=0; q<inch; q++) { // transpose 4x12 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" "st1 {v4.4h}, [%1], #8 \n" "st1 {v1.8h}, [%1], #16 \n" "st1 {v5.4h}, [%1], #8 \n" "sub %0, %0, #64 \n" "st1 {v2.8h}, [%1], #16 \n" "st1 {v6.4h}, [%1], #8 \n" "st1 {v3.8h}, [%1], #16 \n" "st1 {v7.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); img0 += bottom_blob.cstep * 4; } } #else remain_size_start = 0; #endif nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 8; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8); #else unsigned short* tmpptr = tmp.channel(i/8); #endif for (int q=0; q<inch; q++) { // transpose 4x8 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3" ); #else asm volatile( "pld [%0, #256] \n" "vld4.u16 {d0-d3}, [%0]! \n" "pld [%0, #256] \n" "vld4.u16 {d4-d7}, [%0] \n" "sub %0, %0, #32 \n" "vst1.u16 {d0}, [%1 :64]! \n" "vst1.u16 {d4}, [%1 :64]! \n" "vst1.u16 {d1}, [%1 :64]! \n" "vst1.u16 {d5}, [%1 :64]! \n" "vst1.u16 {d2}, [%1 :64]! \n" "vst1.u16 {d6}, [%1 :64]! \n" "vst1.u16 {d3}, [%1 :64]! \n" "vst1.u16 {d7}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4); #endif for (int q=0; q<inch; q++) { // transpose 4x4 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n" "st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld4.u16 {d0-d3}, [%0 :128] \n" "vst1.u16 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0" ); #else asm volatile( "pld [%0, #64] \n" "vld1.u16 {d0}, [%0 :64] \n" "vst1.u16 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } } int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p+1); unsigned short* outptr2 = top_blob.channel(p+2); unsigned short* outptr3 = top_blob.channel(p+3); unsigned short* outptr4 = top_blob.channel(p+4); unsigned short* outptr5 = top_blob.channel(p+5); unsigned short* outptr6 = top_blob.channel(p+6); unsigned short* outptr7 = top_blob.channel(p+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i=0; for (; i+11<size; i+=12) { unsigned short* tmpptr = tmp.channel(i/12); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8); int nn = inch;// inch always > 0 asm volatile( "ld1 {v30.4s, v31.4s}, [%22] \n" "dup v8.4s, v30.s[0] \n" "dup v9.4s, v30.s[0] \n" "dup v10.4s, v30.s[0] \n" "dup v11.4s, v30.s[1] \n" "dup v12.4s, v30.s[1] \n" "dup v13.4s, v30.s[1] \n" "dup v14.4s, v30.s[2] \n" "dup v15.4s, v30.s[2] \n" "dup v16.4s, v30.s[2] \n" "dup v17.4s, v30.s[3] \n" "dup v18.4s, v30.s[3] \n" "dup v19.4s, v30.s[3] \n" "dup v20.4s, v31.s[0] \n" "dup v21.4s, v31.s[0] \n" "dup v22.4s, v31.s[0] \n" "dup v23.4s, v31.s[1] \n" "dup v24.4s, v31.s[1] \n" "dup v25.4s, v31.s[1] \n" "dup v26.4s, v31.s[2] \n" "dup v27.4s, v31.s[2] \n" "dup v28.4s, v31.s[2] \n" "dup v29.4s, v31.s[3] \n" "dup v30.4s, v31.s[3] \n" "dup v31.4s, v31.s[3] \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v23.4s, v0.4s, v5.s[1] \n" "fmla v26.4s, v0.4s, v5.s[2] \n" "fmla v29.4s, v0.4s, v5.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v21.4s, v1.4s, v5.s[0] \n" "fmla v24.4s, v1.4s, v5.s[1] \n" "fmla v27.4s, v1.4s, v5.s[2] \n" "fmla v30.4s, v1.4s, v5.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "fmla v22.4s, v2.4s, v5.s[0] \n" "fmla v25.4s, v2.4s, v5.s[1] \n" "fmla v28.4s, v2.4s, v5.s[2] \n" "fmla v31.4s, v2.4s, v5.s[3] \n" "fmla v8.4s, v3.4s, v6.s[0] \n" "fmla v11.4s, v3.4s, v6.s[1] \n" "fmla v14.4s, v3.4s, v6.s[2] \n" "fmla v17.4s, v3.4s, v6.s[3] \n" "fmla v20.4s, v3.4s, v7.s[0] \n" "fmla v23.4s, v3.4s, v7.s[1] \n" "fmla v26.4s, v3.4s, v7.s[2] \n" "fmla v29.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v9.4s, v0.4s, v6.s[0] \n" "fmla v12.4s, v0.4s, v6.s[1] \n" "fmla v15.4s, v0.4s, v6.s[2] \n" "fmla v18.4s, v0.4s, v6.s[3] \n" "fmla v21.4s, v0.4s, v7.s[0] \n" "fmla v24.4s, v0.4s, v7.s[1] \n" "fmla v27.4s, v0.4s, v7.s[2] \n" "fmla v30.4s, v0.4s, v7.s[3] \n" "fmla v10.4s, v1.4s, v6.s[0] \n" "fmla v13.4s, v1.4s, v6.s[1] \n" "fmla v16.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v22.4s, v1.4s, v7.s[0] \n" "fmla v25.4s, v1.4s, v7.s[1] \n" "fmla v28.4s, v1.4s, v7.s[2] \n" "fmla v31.4s, v1.4s, v7.s[3] \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v2.4s, v4.s[0] \n" "fmla v11.4s, v2.4s, v4.s[1] \n" "fmla v14.4s, v2.4s, v4.s[2] \n" "fmla v17.4s, v2.4s, v4.s[3] \n" "fmla v20.4s, v2.4s, v5.s[0] \n" "fmla v23.4s, v2.4s, v5.s[1] \n" "fmla v26.4s, v2.4s, v5.s[2] \n" "fmla v29.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v4.s[0] \n" "fmla v12.4s, v3.4s, v4.s[1] \n" "fmla v15.4s, v3.4s, v4.s[2] \n" "fmla v18.4s, v3.4s, v4.s[3] \n" "fmla v21.4s, v3.4s, v5.s[0] \n" "fmla v24.4s, v3.4s, v5.s[1] \n" "fmla v27.4s, v3.4s, v5.s[2] \n" "fmla v30.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v10.4s, v0.4s, v4.s[0] \n" "fmla v13.4s, v0.4s, v4.s[1] \n" "fmla v16.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v22.4s, v0.4s, v5.s[0] \n" "fmla v25.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v31.4s, v0.4s, v5.s[3] \n" "fmla v8.4s, v1.4s, v6.s[0] \n" "fmla v11.4s, v1.4s, v6.s[1] \n" "fmla v14.4s, v1.4s, v6.s[2] \n" "fmla v17.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v23.4s, v1.4s, v7.s[1] \n" "fmla v26.4s, v1.4s, v7.s[2] \n" "fmla v29.4s, v1.4s, v7.s[3] \n" "fmla v9.4s, v2.4s, v6.s[0] \n" "fmla v12.4s, v2.4s, v6.s[1] \n" "fmla v15.4s, v2.4s, v6.s[2] \n" "fmla v18.4s, v2.4s, v6.s[3] \n" "fmla v21.4s, v2.4s, v7.s[0] \n" "fmla v24.4s, v2.4s, v7.s[1] \n" "fmla v27.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v10.4s, v3.4s, v6.s[0] \n" "fmla v13.4s, v3.4s, v6.s[1] \n" "fmla v16.4s, v3.4s, v6.s[2] \n" "fmla v19.4s, v3.4s, v6.s[3] \n" "fmla v22.4s, v3.4s, v7.s[0] \n" "fmla v25.4s, v3.4s, v7.s[1] \n" "fmla v28.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n" "st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n" "st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n" "st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n" "st1 {v20.4h, v21.4h, v22.4h}, [%5], #24 \n" "st1 {v23.4h, v24.4h, v25.4h}, [%6], #24 \n" "st1 {v26.4h, v27.4h, v28.4h}, [%7], #24 \n" "st1 {v29.4h, v30.4h, v31.4h}, [%8], #24 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "r"(biasptr) // %22 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+7<size; i+=8) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8); int nn = inch;// inch always > 0 asm volatile( "ld1 {v30.4s, v31.4s}, [%22] \n" "dup v16.4s, v30.s[0] \n" "dup v17.4s, v30.s[0] \n" "dup v18.4s, v30.s[1] \n" "dup v19.4s, v30.s[1] \n" "dup v20.4s, v30.s[2] \n" "dup v21.4s, v30.s[2] \n" "dup v22.4s, v30.s[3] \n" "dup v23.4s, v30.s[3] \n" "dup v24.4s, v31.s[0] \n" "dup v25.4s, v31.s[0] \n" "dup v26.4s, v31.s[1] \n" "dup v27.4s, v31.s[1] \n" "dup v28.4s, v31.s[2] \n" "dup v29.4s, v31.s[2] \n" "dup v30.4s, v31.s[3] \n" "dup v31.4s, v31.s[3] \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v18.4s, v0.4s, v4.s[1] \n" "fmla v20.4s, v0.4s, v4.s[2] \n" "fmla v22.4s, v0.4s, v4.s[3] \n" "fmla v24.4s, v0.4s, v5.s[0] \n" "fmla v26.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v30.4s, v0.4s, v5.s[3] \n" "fmla v17.4s, v1.4s, v4.s[0] \n" "fmla v19.4s, v1.4s, v4.s[1] \n" "fmla v21.4s, v1.4s, v4.s[2] \n" "fmla v23.4s, v1.4s, v4.s[3] \n" "fmla v25.4s, v1.4s, v5.s[0] \n" "fmla v27.4s, v1.4s, v5.s[1] \n" "fmla v29.4s, v1.4s, v5.s[2] \n" "fmla v31.4s, v1.4s, v5.s[3] \n" "fmla v16.4s, v2.4s, v6.s[0] \n" "fmla v18.4s, v2.4s, v6.s[1] \n" "fmla v20.4s, v2.4s, v6.s[2] \n" "fmla v22.4s, v2.4s, v6.s[3] \n" "fmla v24.4s, v2.4s, v7.s[0] \n" "fmla v26.4s, v2.4s, v7.s[1] \n" "fmla v28.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v17.4s, v3.4s, v6.s[0] \n" "fmla v19.4s, v3.4s, v6.s[1] \n" "fmla v21.4s, v3.4s, v6.s[2] \n" "fmla v23.4s, v3.4s, v6.s[3] \n" "fmla v25.4s, v3.4s, v7.s[0] \n" "fmla v27.4s, v3.4s, v7.s[1] \n" "fmla v29.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v12.4s, v8.s[0] \n" "fmla v18.4s, v12.4s, v8.s[1] \n" "fmla v20.4s, v12.4s, v8.s[2] \n" "fmla v22.4s, v12.4s, v8.s[3] \n" "fmla v24.4s, v12.4s, v9.s[0] \n" "fmla v26.4s, v12.4s, v9.s[1] \n" "fmla v28.4s, v12.4s, v9.s[2] \n" "fmla v30.4s, v12.4s, v9.s[3] \n" "fmla v17.4s, v13.4s, v8.s[0] \n" "fmla v19.4s, v13.4s, v8.s[1] \n" "fmla v21.4s, v13.4s, v8.s[2] \n" "fmla v23.4s, v13.4s, v8.s[3] \n" "fmla v25.4s, v13.4s, v9.s[0] \n" "fmla v27.4s, v13.4s, v9.s[1] \n" "fmla v29.4s, v13.4s, v9.s[2] \n" "fmla v31.4s, v13.4s, v9.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v10.s[0] \n" "fmla v18.4s, v14.4s, v10.s[1] \n" "fmla v20.4s, v14.4s, v10.s[2] \n" "fmla v22.4s, v14.4s, v10.s[3] \n" "fmla v24.4s, v14.4s, v11.s[0] \n" "fmla v26.4s, v14.4s, v11.s[1] \n" "fmla v28.4s, v14.4s, v11.s[2] \n" "fmla v30.4s, v14.4s, v11.s[3] \n" "fmla v17.4s, v15.4s, v10.s[0] \n" "fmla v19.4s, v15.4s, v10.s[1] \n" "fmla v21.4s, v15.4s, v10.s[2] \n" "fmla v23.4s, v15.4s, v10.s[3] \n" "fmla v25.4s, v15.4s, v11.s[0] \n" "fmla v27.4s, v15.4s, v11.s[1] \n" "fmla v29.4s, v15.4s, v11.s[2] \n" "fmla v31.4s, v15.4s, v11.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v16.4h, v17.4h}, [%1], #16 \n" "st1 {v18.4h, v19.4h}, [%2], #16 \n" "st1 {v20.4h, v21.4h}, [%3], #16 \n" "st1 {v22.4h, v23.4h}, [%4], #16 \n" "st1 {v24.4h, v25.4h}, [%5], #16 \n" "st1 {v26.4h, v27.4h}, [%6], #16 \n" "st1 {v28.4h, v29.4h}, [%7], #16 \n" "st1 {v30.4h, v31.4h}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "r"(biasptr) // %22 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+3<size; i+=4) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8); int nn = inch;// inch always > 0 asm volatile( "ld1 {v22.4s, v23.4s}, [%22] \n" "dup v16.4s, v22.s[0] \n" "dup v17.4s, v22.s[1] \n" "dup v18.4s, v22.s[2] \n" "dup v19.4s, v22.s[3] \n" "dup v20.4s, v23.s[0] \n" "dup v21.4s, v23.s[1] \n" "dup v22.4s, v23.s[2] \n" "dup v23.4s, v23.s[3] \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v17.4s, v0.4s, v4.s[1] \n" "fmla v18.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v21.4s, v0.4s, v5.s[1] \n" "fmla v22.4s, v0.4s, v5.s[2] \n" "fmla v23.4s, v0.4s, v5.s[3] \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v1.4s, v6.s[0] \n" "fmla v17.4s, v1.4s, v6.s[1] \n" "fmla v18.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v21.4s, v1.4s, v7.s[1] \n" "fmla v22.4s, v1.4s, v7.s[2] \n" "fmla v23.4s, v1.4s, v7.s[3] \n" "fmla v16.4s, v2.4s, v8.s[0] \n" "fmla v17.4s, v2.4s, v8.s[1] \n" "fmla v18.4s, v2.4s, v8.s[2] \n" "fmla v19.4s, v2.4s, v8.s[3] \n" "fmla v20.4s, v2.4s, v9.s[0] \n" "fmla v21.4s, v2.4s, v9.s[1] \n" "fmla v22.4s, v2.4s, v9.s[2] \n" "fmla v23.4s, v2.4s, v9.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v3.4s, v10.s[0] \n" "fmla v17.4s, v3.4s, v10.s[1] \n" "fmla v18.4s, v3.4s, v10.s[2] \n" "fmla v19.4s, v3.4s, v10.s[3] \n" "fmla v20.4s, v3.4s, v11.s[0] \n" "fmla v21.4s, v3.4s, v11.s[1] \n" "fmla v22.4s, v3.4s, v11.s[2] \n" "fmla v23.4s, v3.4s, v11.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h}, [%1], #8 \n" "st1 {v17.4h}, [%2], #8 \n" "st1 {v18.4h}, [%3], #8 \n" "st1 {v19.4h}, [%4], #8 \n" "st1 {v20.4h}, [%5], #8 \n" "st1 {v21.4h}, [%6], #8 \n" "st1 {v22.4h}, [%7], #8 \n" "st1 {v23.4h}, [%8], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "r"(biasptr) // %22 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i<size; i++) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8); int nn = inch;// inch always > 0 asm volatile( "ld1 {v16.4s, v17.4s}, [%22] \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%9, #64] \n" "ld1 {v0.4h}, [%9], #8 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v5.4s, v0.s[0] \n" "fmla v18.4s, v6.4s, v0.s[1] \n" "fmla v19.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[2] \n" "fmla v17.4s, v9.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v18.4s, v10.4s, v0.s[3] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "st1 {v16.h}[0], [%1], #2 \n" "st1 {v16.h}[1], [%2], #2 \n" "st1 {v16.h}[2], [%3], #2 \n" "st1 {v16.h}[3], [%4], #2 \n" "st1 {v17.h}[0], [%5], #2 \n" "st1 {v17.h}[1], [%6], #2 \n" "st1 {v17.h}[2], [%7], #2 \n" "st1 {v17.h}[3], [%8], #2 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "r"(biasptr) // %22 : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19" ); } } remain_outch_start += nn_outch << 3; nn_outch = (outch - remain_outch_start) >> 2; #else // __aarch64__ nn_outch = outch >> 2; #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p+1); unsigned short* outptr2 = top_blob.channel(p+2); unsigned short* outptr3 = top_blob.channel(p+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i=0; #if __aarch64__ for (; i+11<size; i+=12) { unsigned short* tmpptr = tmp.channel(i/12); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4); int nn = inch;// inch always > 0 asm volatile( "ld1 {v19.4s}, [%14] \n" "dup v8.4s, v19.s[0] \n" "dup v9.4s, v19.s[0] \n" "dup v10.4s, v19.s[0] \n" "dup v11.4s, v19.s[1] \n" "dup v12.4s, v19.s[1] \n" "dup v13.4s, v19.s[1] \n" "dup v14.4s, v19.s[2] \n" "dup v15.4s, v19.s[2] \n" "dup v16.4s, v19.s[2] \n" "dup v17.4s, v19.s[3] \n" "dup v18.4s, v19.s[3] \n" "dup v19.4s, v19.s[3] \n" "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n" "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "fmla v8.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v14.4s, v3.4s, v5.s[2] \n" "fmla v17.4s, v3.4s, v5.s[3] \n" "fmla v9.4s, v20.4s, v5.s[0] \n" "fmla v12.4s, v20.4s, v5.s[1] \n" "fmla v15.4s, v20.4s, v5.s[2] \n" "fmla v18.4s, v20.4s, v5.s[3] \n" "fmla v10.4s, v21.4s, v5.s[0] \n" "fmla v13.4s, v21.4s, v5.s[1] \n" "fmla v16.4s, v21.4s, v5.s[2] \n" "fmla v19.4s, v21.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v8.4s, v22.4s, v6.s[0] \n" "fmla v11.4s, v22.4s, v6.s[1] \n" "fmla v14.4s, v22.4s, v6.s[2] \n" "fmla v17.4s, v22.4s, v6.s[3] \n" "fmla v9.4s, v23.4s, v6.s[0] \n" "fmla v12.4s, v23.4s, v6.s[1] \n" "fmla v15.4s, v23.4s, v6.s[2] \n" "fmla v18.4s, v23.4s, v6.s[3] \n" "fmla v10.4s, v24.4s, v6.s[0] \n" "fmla v13.4s, v24.4s, v6.s[1] \n" "fmla v16.4s, v24.4s, v6.s[2] \n" "fmla v19.4s, v24.4s, v6.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v25.4s, v7.s[0] \n" "fmla v11.4s, v25.4s, v7.s[1] \n" "fmla v14.4s, v25.4s, v7.s[2] \n" "fmla v17.4s, v25.4s, v7.s[3] \n" "fmla v9.4s, v26.4s, v7.s[0] \n" "fmla v12.4s, v26.4s, v7.s[1] \n" "fmla v15.4s, v26.4s, v7.s[2] \n" "fmla v18.4s, v26.4s, v7.s[3] \n" "fmla v10.4s, v27.4s, v7.s[0] \n" "fmla v13.4s, v27.4s, v7.s[1] \n" "fmla v16.4s, v27.4s, v7.s[2] \n" "fmla v19.4s, v27.4s, v7.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n" "st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n" "st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n" "st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); } #endif // __aarch64__ for (; i+7<size; i+=8) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4); #else unsigned short* tmpptr = tmp.channel(i/8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v15.4s}, [%14] \n" "dup v8.4s, v15.s[0] \n" "dup v9.4s, v15.s[0] \n" "dup v10.4s, v15.s[1] \n" "dup v11.4s, v15.s[1] \n" "dup v12.4s, v15.s[2] \n" "dup v13.4s, v15.s[2] \n" "dup v14.4s, v15.s[3] \n" "dup v15.4s, v15.s[3] \n" "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v10.4s, v0.4s, v4.s[1] \n" "fmla v12.4s, v0.4s, v4.s[2] \n" "fmla v14.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v11.4s, v1.4s, v4.s[1] \n" "fmla v13.4s, v1.4s, v4.s[2] \n" "fmla v15.4s, v1.4s, v4.s[3] \n" "fmla v8.4s, v2.4s, v5.s[0] \n" "fmla v10.4s, v2.4s, v5.s[1] \n" "fmla v12.4s, v2.4s, v5.s[2] \n" "fmla v14.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v13.4s, v3.4s, v5.s[2] \n" "fmla v15.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v8.4s, v16.4s, v6.s[0] \n" "fmla v10.4s, v16.4s, v6.s[1] \n" "fmla v12.4s, v16.4s, v6.s[2] \n" "fmla v14.4s, v16.4s, v6.s[3] \n" "fmla v9.4s, v17.4s, v6.s[0] \n" "fmla v11.4s, v17.4s, v6.s[1] \n" "fmla v13.4s, v17.4s, v6.s[2] \n" "fmla v15.4s, v17.4s, v6.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v18.4s, v7.s[0] \n" "fmla v10.4s, v18.4s, v7.s[1] \n" "fmla v12.4s, v18.4s, v7.s[2] \n" "fmla v14.4s, v18.4s, v7.s[3] \n" "fmla v9.4s, v19.4s, v7.s[0] \n" "fmla v11.4s, v19.4s, v7.s[1] \n" "fmla v13.4s, v19.4s, v7.s[2] \n" "fmla v15.4s, v19.4s, v7.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%1], #16 \n" "st1 {v10.4h, v11.4h}, [%2], #16 \n" "st1 {v12.4h, v13.4h}, [%3], #16 \n" "st1 {v14.4h, v15.4h}, [%4], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else // __aarch64__ asm volatile( "vld1.f32 {d30-d31}, [%14] \n" "vdup.f32 q8, d30[0] \n" "vdup.f32 q9, d30[0] \n" "vdup.f32 q10, d30[1] \n" "vdup.f32 q11, d30[1] \n" "vdup.f32 q12, d31[0] \n" "vdup.f32 q13, d31[0] \n" "vdup.f32 q14, d31[1] \n" "vdup.f32 q15, d31[1] \n" "0: \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5]! \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q10, q0, d8[1] \n" "vmla.f32 q12, q0, d9[0] \n" "vmla.f32 q14, q0, d9[1] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q11, q1, d8[1] \n" "vmla.f32 q13, q1, d9[0] \n" "vmla.f32 q15, q1, d9[1] \n" "vmla.f32 q8, q2, d10[0] \n" "vmla.f32 q10, q2, d10[1] \n" "vmla.f32 q12, q2, d11[0] \n" "vmla.f32 q14, q2, d11[1] \n" "vmla.f32 q9, q3, d10[0] \n" "vmla.f32 q11, q3, d10[1] \n" "vmla.f32 q13, q3, d11[0] \n" "vmla.f32 q15, q3, d11[1] \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q8, q0, d12[0] \n" "vmla.f32 q10, q0, d12[1] \n" "vmla.f32 q12, q0, d13[0] \n" "vmla.f32 q14, q0, d13[1] \n" "vmla.f32 q9, q1, d12[0] \n" "vmla.f32 q11, q1, d12[1] \n" "vmla.f32 q13, q1, d13[0] \n" "vmla.f32 q15, q1, d13[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d14[0] \n" "vmla.f32 q10, q2, d14[1] \n" "vmla.f32 q12, q2, d15[0] \n" "vmla.f32 q14, q2, d15[1] \n" "vmla.f32 q9, q3, d14[0] \n" "vmla.f32 q11, q3, d14[1] \n" "vmla.f32 q13, q3, d15[0] \n" "vmla.f32 q15, q3, d15[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d21, q11, #16 \n" "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d28, q14, #16 \n" "vshrn.u32 d29, q15, #16 \n" "vst1.u16 {d16-d17}, [%1 :64]! \n" "vst1.u16 {d20-d21}, [%2 :64]! \n" "vst1.u16 {d24-d25}, [%3 :64]! \n" "vst1.u16 {d28-d29}, [%4 :64]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; i+3<size; i+=4) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v11.4s}, [%14] \n" "dup v8.4s, v11.s[0] \n" "dup v9.4s, v11.s[1] \n" "dup v10.4s, v11.s[2] \n" "dup v11.4s, v11.s[3] \n" "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v0.4s, v4.s[1] \n" "fmla v10.4s, v0.4s, v4.s[2] \n" "fmla v11.4s, v0.4s, v4.s[3] \n" "fmla v8.4s, v1.4s, v5.s[0] \n" "fmla v9.4s, v1.4s, v5.s[1] \n" "fmla v10.4s, v1.4s, v5.s[2] \n" "fmla v11.4s, v1.4s, v5.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v2.4s, v6.s[0] \n" "fmla v9.4s, v2.4s, v6.s[1] \n" "fmla v10.4s, v2.4s, v6.s[2] \n" "fmla v11.4s, v2.4s, v6.s[3] \n" "fmla v8.4s, v3.4s, v7.s[0] \n" "fmla v9.4s, v3.4s, v7.s[1] \n" "fmla v10.4s, v3.4s, v7.s[2] \n" "fmla v11.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "st1 {v8.4h}, [%1], #8 \n" "st1 {v9.4h}, [%2], #8 \n" "st1 {v10.4h}, [%3], #8 \n" "st1 {v11.4h}, [%4], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else // __aarch64__ asm volatile( "vld1.f32 {d22-d23}, [%14] \n" "vdup.f32 q8, d22[0] \n" "vdup.f32 q9, d22[1] \n" "vdup.f32 q10, d23[0] \n" "vdup.f32 q11, d23[1] \n" "0: \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5]! \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q0, d8[1] \n" "vmla.f32 q10, q0, d9[0] \n" "vmla.f32 q11, q0, d9[1] \n" "vmla.f32 q8, q1, d10[0] \n" "vmla.f32 q9, q1, d10[1] \n" "vmla.f32 q10, q1, d11[0] \n" "vmla.f32 q11, q1, d11[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d12[0] \n" "vmla.f32 q9, q2, d12[1] \n" "vmla.f32 q10, q2, d13[0] \n" "vmla.f32 q11, q2, d13[1] \n" "vmla.f32 q8, q3, d14[0] \n" "vmla.f32 q9, q3, d14[1] \n" "vmla.f32 q10, q3, d15[0] \n" "vmla.f32 q11, q3, d15[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d18, q9, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d22, q11, #16 \n" "vst1.u16 {d16}, [%1 :64]! \n" "vst1.u16 {d18}, [%2 :64]! \n" "vst1.u16 {d20}, [%3 :64]! \n" "vst1.u16 {d22}, [%4 :64]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif // __aarch64__ } for (; i<size; i++) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v8.4s}, [%14] \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "shrn v8.4h, v8.4s, #16 \n" "st1 {v8.h}[0], [%1], #2 \n" "st1 {v8.h}[1], [%2], #2 \n" "st1 {v8.h}[2], [%3], #2 \n" "st1 {v8.h}[3], [%4], #2 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else // __aarch64__ asm volatile( "vld1.f32 {d16-d17}, [%14] \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5]! \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6]! \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q7, d1[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vshrn.u32 d16, q8, #16 \n" "vst1.u16 {d16[0]}, [%1]! \n" "vst1.u16 {d16[1]}, [%2]! \n" "vst1.u16 {d16[2]}, [%3]! \n" "vst1.u16 {d16[3]}, [%4]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif // __aarch64__ } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { unsigned short* outptr0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; int i=0; #if __aarch64__ for (; i+11<size; i+=12) { unsigned short* tmpptr = tmp.channel(i/12); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4); int nn = inch;// inch always > 0 asm volatile( "dup v8.4s, %w8 \n" "dup v9.4s, %w8 \n" "dup v10.4s, %w8 \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3], #8 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v5.4s, v3.4s, v4.s[1] \n" "fmla v6.4s, v12.4s, v4.s[1] \n" "fmla v7.4s, v13.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v8.4s, v14.4s, v4.s[2] \n" "fmla v9.4s, v15.4s, v4.s[2] \n" "fmla v10.4s, v16.4s, v4.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v5.4s, v17.4s, v4.s[3] \n" "fmla v6.4s, v18.4s, v4.s[3] \n" "fmla v7.4s, v19.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v5.4s \n" "fadd v9.4s, v9.4s, v6.4s \n" "fadd v10.4s, v10.4s, v7.4s \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } #endif // __aarch64__ for (; i+7<size; i+=8) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4); #else unsigned short* tmpptr = tmp.channel(i/8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "dup v8.4s, %w8 \n" "dup v9.4s, %w8 \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3], #8 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[1] \n" "fmla v11.4s, v3.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v8.4s, v12.4s, v4.s[2] \n" "fmla v9.4s, v13.4s, v4.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v10.4s, v14.4s, v4.s[3] \n" "fmla v11.4s, v15.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); #else // __aarch64__ asm volatile( "vdup.f32 q8, %8 \n" "vdup.f32 q9, %8 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "pld [%3, #64] \n" "vld1.u16 {d9}, [%3]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q10, q2, d8[1] \n" "vmla.f32 q11, q3, d8[1] \n" "pld [%2, #256] \n" "vld1.u16 {d28-d31}, [%2]! \n" "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q8, q12, d9[0] \n" "vmla.f32 q9, q13, d9[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q10, q14, d9[1] \n" "vmla.f32 q11, q15, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vst1.u16 {d16-d17}, [%1 :64]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; i+3<size; i+=4) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "dup v8.4s, %w8 \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3], #8 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "shrn v8.4h, v8.4s, #16 \n" "st1 {v8.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11" ); #else // __aarch64__ asm volatile( "vdup.f32 q8, %8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "pld [%3, #64] \n" "vld1.u16 {d9}, [%3]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vshrn.u32 d16, q8, #16 \n" "vst1.u16 {d16}, [%1]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11" ); #endif // __aarch64__ } for (; i<size; i++) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4); #endif float32x4_t _sum0 = vdupq_n_f32(0.f); for (int q=0; q<inch; q++) { float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(tmpptr), 16)); float32x4_t _k0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(kptr), 16)); _sum0 = vmlaq_f32(_sum0, _r0, _k0); kptr += 4; tmpptr += 4; } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss2 = vpadd_f32(_ss, _ss); float sum0 = vget_lane_f32(_ss2, 0); #endif outptr0[0] = float32_to_bfloat16(bias0 + sum0); outptr0++; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // unsigned short* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const unsigned short* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const unsigned short* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2*outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<channels; p++) { const unsigned short* r0 = bottom_blob.channel(p); unsigned short* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j+3 < outw; j+=4) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0+8); uint16x4_t _v2 = vld1_u16(r0+16); uint16x4_t _v3 = vld1_u16(r0+24); uint16x8_t _v01 = vcombine_u16(_v0, _v1); uint16x8_t _v23 = vcombine_u16(_v2, _v3); vst1q_u16(outptr, _v01); vst1q_u16(outptr+8, _v23); r0 += 32; outptr += 16; } for (; j+1 < outw; j+=2) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0+8); uint16x8_t _v = vcombine_u16(_v0, _v1); vst1q_u16(outptr, _v); r0 += 16; outptr += 8; } for (; j < outw; j++) { uint16x4_t _v = vld1_u16(r0); vst1_u16(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4to1_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
grid_astar.h
/* * Copyright (c) 2014-2020, the neonavigation authors * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef PLANNER_CSPACE_GRID_ASTAR_H #define PLANNER_CSPACE_GRID_ASTAR_H #define _USE_MATH_DEFINES #include <cfloat> #include <cmath> #include <limits> #include <list> #include <map> #include <memory> #include <unordered_map> #include <utility> #include <vector> #include <boost/chrono.hpp> #include <planner_cspace/reservable_priority_queue.h> #include <planner_cspace/cyclic_vec.h> #include <planner_cspace/blockmem_gridmap.h> #include <planner_cspace/grid_astar_model.h> #include <omp.h> namespace planner_cspace { struct SearchStats { size_t num_loop; size_t num_search_queue; size_t num_prev_updates; size_t num_total_updates; }; template <int DIM = 3, int NONCYCLIC = 2> class GridAstar { public: using Vec = CyclicVecInt<DIM, NONCYCLIC>; using Vecf = CyclicVecFloat<DIM, NONCYCLIC>; using VecWithCost = typename GridAstarModelBase<DIM, NONCYCLIC>::VecWithCost; using ProgressCallback = std::function<bool(const std::list<Vec>&, const SearchStats&)>; template <class T, int block_width = 0x20> class Gridmap : public BlockMemGridmap<T, DIM, NONCYCLIC, block_width> { using BlockMemGridmap<T, DIM, NONCYCLIC, block_width>::BlockMemGridmap; }; class PriorityVec { public: float p_; float p_raw_; Vec v_; PriorityVec(const float p, const float p_raw, const Vec& v) : p_(p) , p_raw_(p_raw) , v_(v) { } bool operator<(const PriorityVec& b) const { // smaller first return p_ > b.p_; } }; class GridmapUpdate { private: const Vec p0_; const Vec p1_; const float cost_estim_; const float cost_; public: GridmapUpdate( const Vec& p0, const Vec& p1, const float cost_estim, const float cost) : p0_(p0) , p1_(p1) , cost_estim_(cost_estim) , cost_(cost) { } const Vec& getParentPos() const { return p0_; } const Vec& getPos() const { return p1_; } const float getCost() const { return cost_; } const PriorityVec getPriorityVec() const { return PriorityVec(cost_estim_, cost_, p1_); } }; public: constexpr int getDim() const { return DIM; } constexpr int getNoncyclic() const { return NONCYCLIC; } void setSearchTaskNum(const size_t& search_task_num) { search_task_num_ = search_task_num; } void reset(const Vec size) { g_.reset(size); g_.clear(std::numeric_limits<float>::max()); parents_.reserve(g_.ser_size() / 16); open_.reserve(g_.ser_size() / 16); } GridAstar() : queue_size_limit_(0) , search_task_num_(1) { } explicit GridAstar(const Vec size) { reset(size); queue_size_limit_ = 0; } void setQueueSizeLimit(const size_t size) { queue_size_limit_ = size; } bool search( const std::vector<VecWithCost>& ss, const Vec& e, std::list<Vec>& path, const typename GridAstarModelBase<DIM, NONCYCLIC>::Ptr& model, ProgressCallback cb_progress, const float cost_leave, const float progress_interval, const bool return_best = false) { return searchImpl( g_, ss, e, path, model, cb_progress, cost_leave, progress_interval, return_best); } protected: bool searchImpl( Gridmap<float>& g, const std::vector<VecWithCost>& sts, const Vec& en, std::list<Vec>& path, const typename GridAstarModelBase<DIM, NONCYCLIC>::Ptr& model, ProgressCallback cb_progress, const float cost_leave, const float progress_interval, const bool return_best = false) { if (sts.size() == 0) return false; auto ts = boost::chrono::high_resolution_clock::now(); Vec e = en; e.cycleUnsigned(g.size()); g.clear(std::numeric_limits<float>::max()); open_.clear(); parents_.clear(); std::vector<VecWithCost> ss_normalized; Vec better; int cost_estim_min = std::numeric_limits<int>::max(); for (const VecWithCost& st : sts) { if (st.v_ == en) return false; Vec s = st.v_; s.cycleUnsigned(g.size()); ss_normalized.emplace_back(s, st.c_); g[s] = st.c_; const int cost_estim = model->costEstim(s, e); open_.emplace(cost_estim + st.c_, st.c_, s); if (cost_estim_min > cost_estim) { cost_estim_min = cost_estim; better = s; } } std::vector<PriorityVec> centers; centers.reserve(search_task_num_); size_t num_updates(0); size_t num_total_updates(0); size_t num_loop(0); bool found(false); bool abort(false); #pragma omp parallel { std::vector<GridmapUpdate> updates; // Reserve buffer using example search diff list updates.reserve( search_task_num_ * model->searchGrids(ss_normalized[0].v_, ss_normalized, e).size() / omp_get_num_threads()); std::vector<Vec> dont; dont.reserve(search_task_num_); while (true) { #pragma omp barrier #pragma omp single { const size_t num_search_queue = open_.size(); num_loop++; // Fetch tasks to be paralellized centers.clear(); for (size_t i = 0; i < search_task_num_;) { if (open_.size() == 0) break; PriorityVec center(open_.top()); open_.pop(); if (center.v_ == e || center.p_ - center.p_raw_ < cost_leave) { e = center.v_; found = true; break; } centers.emplace_back(std::move(center)); ++i; } const auto tnow = boost::chrono::high_resolution_clock::now(); if (boost::chrono::duration<float>(tnow - ts).count() >= progress_interval) { std::list<Vec> path_tmp; ts = tnow; findPath(ss_normalized, better, path_tmp); const SearchStats stats = { .num_loop = num_loop, .num_search_queue = num_search_queue, .num_prev_updates = num_updates, .num_total_updates = num_total_updates, }; if (!cb_progress(path_tmp, stats)) { abort = true; } } num_updates = 0; } if (centers.size() < 1 || found || abort) break; updates.clear(); dont.clear(); #pragma omp for schedule(static) for (auto it = centers.cbegin(); it < centers.cend(); ++it) { const Vec p = it->v_; const float c = it->p_raw_; const float c_estim = it->p_; const float gp = g[p]; if (c > gp) continue; if (c_estim - c < cost_estim_min) { cost_estim_min = c_estim - c; better = p; } const std::vector<Vec> search_list = model->searchGrids(p, ss_normalized, e); bool updated(false); for (auto it = search_list.cbegin(); it < search_list.cend(); ++it) { Vec next = p + *it; next.cycleUnsigned(g.size()); if (next.isExceeded(g.size())) continue; if (g[next] < gp) { // Skip as this search task has no chance to find better way. continue; } const float cost_estim = model->costEstim(next, e); if (cost_estim < 0 || cost_estim == std::numeric_limits<float>::max()) continue; const float cost = model->cost(p, next, ss_normalized, e); if (cost < 0 || cost == std::numeric_limits<float>::max()) continue; const float cost_next = c + cost; if (g[next] > cost_next) { updated = true; updates.emplace_back(p, next, cost_next + cost_estim, cost_next); } } if (!updated) dont.push_back(p); } #pragma omp barrier #pragma omp critical { for (const GridmapUpdate& u : updates) { if (g[u.getPos()] > u.getCost()) { g[u.getPos()] = u.getCost(); parents_[u.getPos()] = u.getParentPos(); open_.push(std::move(u.getPriorityVec())); if (queue_size_limit_ > 0 && open_.size() > queue_size_limit_) open_.pop_back(); } } for (const Vec& p : dont) { g[p] = -1; } const size_t n = updates.size(); num_updates += n; num_total_updates += n; } // omp critical } } // omp parallel if (!found) { // No fesible path if (return_best) { findPath(ss_normalized, better, path); } return false; } return findPath(ss_normalized, e, path); } bool findPath(const std::vector<VecWithCost>& ss, const Vec& e, std::list<Vec>& path) const { std::unordered_map<Vec, Vec, Vec> parents = parents_; Vec n = e; while (true) { path.push_front(n); bool found(false); for (const VecWithCost& s : ss) { if (n == s.v_) { found = true; break; } } if (found) break; if (parents.find(n) == parents.end()) return false; const Vec child = n; n = parents[child]; parents.erase(child); } return true; } Gridmap<float> g_; std::unordered_map<Vec, Vec, Vec> parents_; reservable_priority_queue<PriorityVec> open_; size_t queue_size_limit_; size_t search_task_num_; }; } // namespace planner_cspace #endif // PLANNER_CSPACE_GRID_ASTAR_H
task-dependency.c
/* * task-deoendency.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) depend(out : var) { OMPT_SIGNAL(a); var++; } #pragma omp task shared(a) depend(in : var) { OMPT_SIGNAL(a); OMPT_WAIT(a, 3); } #pragma omp task shared(var) // depend(in: var) is missing here! { var++; OMPT_SIGNAL(a); } // Give other thread time to steal the task. OMPT_WAIT(a, 2); } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-dependency.c:41 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-dependency.c:30 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
phonon.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <stddef.h> #include <dynmat.h> #include <phonon.h> #include <lapack_wrapper.h> static size_t collect_undone_grid_points(size_t *undone, char *phonon_done, const size_t num_grid_points, const size_t *grid_points); static void get_undone_phonons(double *frequencies, lapack_complex_double *eigenvectors, const size_t *undone_grid_points, const size_t num_undone_grid_points, PHPYCONST int (*grid_address)[3], const int mesh[3], const double *fc2, PHPYCONST double(*svecs_fc2)[27][3], const int *multi_fc2, const size_t num_patom, const size_t num_satom, const double *masses_fc2, const int *p2s_fc2, const int *s2p_fc2, const double unit_conversion_factor, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const char uplo); static void get_gonze_undone_phonons(double *frequencies, lapack_complex_double *eigenvectors, const size_t *undone_grid_points, const size_t num_undone_grid_points, PHPYCONST int (*grid_address)[3], const int mesh[3], const double *fc2, PHPYCONST double(*svecs_fc2)[27][3], const int *multi_fc2, PHPYCONST double (*positions)[3], const size_t num_patom, const size_t num_satom, const double *masses_fc2, const int *p2s_fc2, const int *s2p_fc2, const double unit_conversion_factor, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, PHPYCONST double(*G_list)[3], const size_t num_G_points, const double lambda, const char uplo); static void get_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const int *p2s, const int *s2p, const int *multi, const size_t num_patom, const size_t num_satom, PHPYCONST double(*svecs)[27][3], const int is_nac, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double unit_conversion_factor); static void get_gonze_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const int *p2s, const int *s2p, const int *multi, PHPYCONST double (*positions)[3], const size_t num_patom, const size_t num_satom, PHPYCONST double(*svecs)[27][3], const int is_nac, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, PHPYCONST double(*G_list)[3], const size_t num_G_points, const double lambda); static void get_dynamical_matrix(lapack_complex_double *dynmat, const double q[3], const double *fc2, const double *masses, const int *p2s, const int *s2p, const int *multi, const size_t num_patom, const size_t num_satom, PHPYCONST double(*svecs)[27][3], const int is_nac, PHPYCONST double (*born)[3][3], /* Wang NAC unless NULL */ PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor); static void get_charge_sum(double (*charge_sum)[3][3], const size_t num_patom, const size_t num_satom, const double q[3], PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor); static int needs_nac(PHPYCONST double (*born)[3][3], PHPYCONST int (*grid_address)[3], const size_t gp, const double *q_direction); void phn_get_phonons_at_gridpoints(double *frequencies, lapack_complex_double *eigenvectors, char *phonon_done, const size_t num_phonons, const size_t *grid_points, const size_t num_grid_points, PHPYCONST int (*grid_address)[3], const int mesh[3], const double *fc2, PHPYCONST double(*svecs_fc2)[27][3], const int *multi_fc2, const size_t num_patom, const size_t num_satom, const double *masses_fc2, const int *p2s_fc2, const int *s2p_fc2, const double unit_conversion_factor, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, /* must be pointer */ const double nac_factor, const char uplo) { size_t num_undone; size_t *undone; undone = (size_t*)malloc(sizeof(size_t) * num_phonons); num_undone = collect_undone_grid_points(undone, phonon_done, num_grid_points, grid_points); get_undone_phonons(frequencies, eigenvectors, undone, num_undone, grid_address, mesh, fc2, svecs_fc2, multi_fc2, num_patom, num_satom, masses_fc2, p2s_fc2, s2p_fc2, unit_conversion_factor, born, dielectric, reciprocal_lattice, q_direction, nac_factor, uplo); free(undone); undone = NULL; } void phn_get_gonze_phonons_at_gridpoints(double *frequencies, lapack_complex_double *eigenvectors, char *phonon_done, const size_t num_phonons, const size_t *grid_points, const size_t num_grid_points, PHPYCONST int (*grid_address)[3], const int mesh[3], const double *fc2, PHPYCONST double(*svecs_fc2)[27][3], const int *multi_fc2, PHPYCONST double (*positions)[3], const size_t num_patom, const size_t num_satom, const double *masses_fc2, const int *p2s_fc2, const int *s2p_fc2, const double unit_conversion_factor, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, /* pointer */ const double nac_factor, const double *dd_q0, PHPYCONST double(*G_list)[3], const size_t num_G_points, const double lambda, const char uplo) { size_t num_undone; size_t *undone; undone = (size_t*)malloc(sizeof(size_t) * num_phonons); num_undone = collect_undone_grid_points(undone, phonon_done, num_grid_points, grid_points); get_gonze_undone_phonons(frequencies, eigenvectors, undone, num_undone, grid_address, mesh, fc2, svecs_fc2, multi_fc2, positions, num_patom, num_satom, masses_fc2, p2s_fc2, s2p_fc2, unit_conversion_factor, born, dielectric, reciprocal_lattice, q_direction, nac_factor, dd_q0, G_list, num_G_points, lambda, uplo); free(undone); undone = NULL; } static size_t collect_undone_grid_points(size_t *undone, char *phonon_done, const size_t num_grid_points, const size_t *grid_points) { size_t i, gp, num_undone; num_undone = 0; for (i = 0; i < num_grid_points; i++) { gp = grid_points[i]; if (phonon_done[gp] == 0) { undone[num_undone] = gp; num_undone++; phonon_done[gp] = 1; } } return num_undone; } static void get_undone_phonons(double *frequencies, lapack_complex_double *eigenvectors, const size_t *undone_grid_points, const size_t num_undone_grid_points, PHPYCONST int (*grid_address)[3], const int mesh[3], const double *fc2, PHPYCONST double(*svecs_fc2)[27][3], const int *multi_fc2, const size_t num_patom, const size_t num_satom, const double *masses_fc2, const int *p2s_fc2, const int *s2p_fc2, const double unit_conversion_factor, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const char uplo) { size_t i, j, gp, num_band; int is_nac, info; double q[3]; double *freqs_tmp; num_band = num_patom * 3; #pragma omp parallel for private(j, q, gp, is_nac) for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; for (j = 0; j < 3; j++) { q[j] = ((double)grid_address[gp][j]) / mesh[j]; } is_nac = needs_nac(born, grid_address, gp, q_direction); get_phonons(eigenvectors + num_band * num_band * gp, q, fc2, masses_fc2, p2s_fc2, s2p_fc2, multi_fc2, num_patom, num_satom, svecs_fc2, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor, unit_conversion_factor); } /* To avoid multithreaded BLAS in OpenMP loop */ #ifndef MULTITHREADED_BLAS #pragma omp parallel for private(j, gp, freqs_tmp, info) #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; freqs_tmp = frequencies + num_band * gp; /* Store eigenvalues in freqs array. */ /* Eigenvectors are overwritten on eigvecs array. */ info = phonopy_zheev(freqs_tmp, eigenvectors + num_band * num_band * gp, num_band, uplo); /* Sqrt of eigenvalues are re-stored in freqs array.*/ for (j = 0; j < num_band; j++) { freqs_tmp[j] = sqrt(fabs(freqs_tmp[j])) * ((freqs_tmp[j] > 0) - (freqs_tmp[j] < 0)) * unit_conversion_factor; } } } static void get_gonze_undone_phonons(double *frequencies, lapack_complex_double *eigenvectors, const size_t *undone_grid_points, const size_t num_undone_grid_points, PHPYCONST int (*grid_address)[3], const int mesh[3], const double *fc2, PHPYCONST double(*svecs_fc2)[27][3], const int *multi_fc2, PHPYCONST double (*positions)[3], const size_t num_patom, const size_t num_satom, const double *masses_fc2, const int *p2s_fc2, const int *s2p_fc2, const double unit_conversion_factor, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, PHPYCONST double(*G_list)[3], const size_t num_G_points, const double lambda, const char uplo) { size_t i, j, gp, num_band; int is_nac, info; double q[3]; double *freqs_tmp; num_band = num_patom * 3; #pragma omp parallel for private(j, q, gp, is_nac) for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; for (j = 0; j < 3; j++) { q[j] = ((double)grid_address[gp][j]) / mesh[j]; } is_nac = needs_nac(born, grid_address, gp, q_direction); get_gonze_phonons(eigenvectors + num_band * num_band * gp, q, fc2, masses_fc2, p2s_fc2, s2p_fc2, multi_fc2, positions, num_patom, num_satom, svecs_fc2, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor, dd_q0, G_list, num_G_points, lambda); } /* To avoid multithreaded BLAS in OpenMP loop */ #ifndef MULTITHREADED_BLAS #pragma omp parallel for private(j, gp, freqs_tmp, info) #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; /* Store eigenvalues in freqs array. */ /* Eigenvectors are overwritten on eigvecs array. */ freqs_tmp = frequencies + num_band * gp; info = phonopy_zheev(freqs_tmp, eigenvectors + num_band * num_band * gp, num_band, uplo); /* Sqrt of eigenvalues are re-stored in freqs array.*/ for (j = 0; j < num_band; j++) { freqs_tmp[j] = sqrt(fabs(freqs_tmp[j])) * ((freqs_tmp[j] > 0) - (freqs_tmp[j] < 0)) * unit_conversion_factor; } } } static void get_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const int *p2s, const int *s2p, const int *multi, const size_t num_patom, const size_t num_satom, PHPYCONST double(*svecs)[27][3], const int is_nac, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double unit_conversion_factor) { /* Store dynamical matrix in eigvecs array. */ get_dynamical_matrix(eigvecs, q, fc2, masses, p2s, s2p, multi, num_patom, num_satom, svecs, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor); } static void get_gonze_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const int *p2s, const int *s2p, const int *multi, PHPYCONST double (*positions)[3], const size_t num_patom, const size_t num_satom, PHPYCONST double(*svecs)[27][3], const int is_nac, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, PHPYCONST double(*G_list)[3], const size_t num_G_points, const double lambda) { size_t i, j, k, l, adrs, num_band; double mm; double q_cart[3]; double *q_dir_cart; lapack_complex_double *dd; dd = NULL; q_dir_cart = NULL; num_band = num_patom * 3; dym_get_dynamical_matrix_at_q((double*)eigvecs, num_patom, num_satom, fc2, q, svecs, multi, masses, s2p, p2s, NULL, 0); dd = (lapack_complex_double*) malloc(sizeof(lapack_complex_double) * num_band * num_band); for (i = 0; i < 3; i++) { q_cart[i] = 0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q[j]; } } if (q_direction) { q_dir_cart = (double*)malloc(sizeof(double) * 3); for (i = 0; i < 3; i++) { q_dir_cart[i] = 0; for (j = 0; j < 3; j++) { q_dir_cart[i] += reciprocal_lattice[i][j] * q_direction[j]; } } } dym_get_recip_dipole_dipole((double*)dd, dd_q0, G_list, num_G_points, num_patom, q_cart, q_dir_cart, born, dielectric, positions, nac_factor, lambda, 1e-5); if (q_direction) { free(q_dir_cart); q_dir_cart = NULL; } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { mm = sqrt(masses[i] * masses[j]); for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l; eigvecs[adrs] = lapack_make_complex_double( lapack_complex_double_real(eigvecs[adrs]) + lapack_complex_double_real(dd[adrs]) / mm, lapack_complex_double_imag(eigvecs[adrs]) + lapack_complex_double_imag(dd[adrs]) / mm); } } } } free(dd); dd = NULL; } static void get_dynamical_matrix(lapack_complex_double *dynmat, const double q[3], const double *fc2, const double *masses, const int *p2s, const int *s2p, const int *multi, const size_t num_patom, const size_t num_satom, PHPYCONST double(*svecs)[27][3], const int is_nac, PHPYCONST double (*born)[3][3], /* Wang NAC unless NULL */ PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor) { double (*charge_sum)[3][3]; charge_sum = NULL; if (is_nac) { charge_sum = (double(*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom * 9); get_charge_sum(charge_sum, num_patom, num_satom, q, born, dielectric, reciprocal_lattice, q_direction, nac_factor); } dym_get_dynamical_matrix_at_q((double*)dynmat, num_patom, num_satom, fc2, q, svecs, multi, masses, s2p, p2s, charge_sum, 0); if (is_nac) { free(charge_sum); charge_sum = NULL; } } static void get_charge_sum(double (*charge_sum)[3][3], const size_t num_patom, const size_t num_satom, const double q[3], PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor) { size_t i, j; double inv_dielectric_factor, dielectric_factor, tmp_val; double q_cart[3]; if (q_direction) { for (i = 0; i < 3; i++) { q_cart[i] = 0.0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q_direction[j]; } } } else { for (i = 0; i < 3; i++) { q_cart[i] = 0.0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q[j]; } } } inv_dielectric_factor = 0.0; for (i = 0; i < 3; i++) { tmp_val = 0.0; for (j = 0; j < 3; j++) { tmp_val += dielectric[i][j] * q_cart[j]; } inv_dielectric_factor += tmp_val * q_cart[i]; } /* N = num_satom / num_patom = number of prim-cell in supercell */ /* N is used for Wang's method. */ dielectric_factor = nac_factor / inv_dielectric_factor / num_satom * num_patom; dym_get_charge_sum(charge_sum, num_patom, dielectric_factor, q_cart, born); } static int needs_nac(PHPYCONST double (*born)[3][3], PHPYCONST int (*grid_address)[3], const size_t gp, const double *q_direction) { int is_nac; if (born) { if (grid_address[gp][0] == 0 && grid_address[gp][1] == 0 && grid_address[gp][2] == 0 && q_direction == NULL) { is_nac = 0; } else { is_nac = 1; } } else { is_nac = 0; } return is_nac; }
test.c
/* * Copyright (c) 2009, 2010, 2011, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <assert.h> #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <assert.h> #include <stdint.h> #include <omp.h> #include <barrelfish/barrelfish.h> #include <bench/bench.h> #include <trace/trace.h> #include <trace_definitions/trace_defs.h> #include <inttypes.h> #define STACK_SIZE (64 * 1024) int main(int argc, char *argv[]) { volatile uint64_t workcnt = 0; int nthreads; debug_printf("bomptest started.\n"); bench_init(); #if CONFIG_TRACE errval_t err = trace_control(TRACE_EVENT(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_START, 0), TRACE_EVENT(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_STOP, 0), 0); assert(err_is_ok(err)); #endif if(argc == 2) { nthreads = atoi(argv[1]); backend_span_domain(nthreads, STACK_SIZE); bomp_custom_init(NULL); omp_set_num_threads(nthreads); } else { assert(!"Specify number of threads"); } trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_START, 0); uint64_t start = bench_tsc(); #pragma omp parallel while(rdtsc() < start + 805000000ULL) { workcnt++; } uint64_t end = bench_tsc(); trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_STOP, 0); printf("done. time taken: %" PRIu64 " cycles.\n", end - start); #if CONFIG_TRACE char *buf = malloc(4096*4096); trace_dump(buf, 4096*4096, NULL); printf("%s\n", buf); #endif for(;;); return 0; }
ll_writable_elements.h
/* * ll_writable_elements.h * LLAMA Graph Analytics * * Copyright 2014 * The President and Fellows of Harvard College. * * Copyright 2014 * Oracle Labs. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef LL_WRITABLE_ELEMENTS_H_ #define LL_WRITABLE_ELEMENTS_H_ #include "llama/ll_common.h" #include "llama/ll_growable_array.h" #include "llama/ll_writable_array.h" #include "llama/ll_mlcsr_helpers.h" #include <climits> #include <unordered_map> #define PARALLEL_FREE_W_NODES #define FOUR_NULLS NULL, NULL, NULL, NULL #define EIGHT_NULLS FOUR_NULLS, FOUR_NULLS #define THIRTY_TWO_NULLS EIGHT_NULLS, EIGHT_NULLS, EIGHT_NULLS, EIGHT_NULLS #define LL_MAX_EDGE_PROPERTY_ID 16 #define LL_WRITABLE_USE_MEMORY_POOL // HACK!!! #ifdef LL_ONE_VT #define LL_MAX_EDGE_PROPERTY_ID 0 #endif #ifdef LL_WRITABLE_USE_MEMORY_POOL #include "llama/ll_mem_helper.h" #endif #ifdef LL_WRITABLE_USE_MEMORY_POOL #define LL_W_MEM_POOL_MAX_BUFFERS_BITS 15 #define LL_W_MEM_POOL_MAX_BUFFERS (1ul << (LL_W_MEM_POOL_MAX_BUFFERS_BITS)) #define LL_W_MEM_POOL_MAX_OFFSET_BITS (26 - LL_MEM_POOL_ALIGN_BITS) #endif #ifdef LL_WRITABLE_USE_MEMORY_POOL #define LL_EDGE_GET_WRITABLE(x) ((w_edge*) (__w_pool.pointer( \ (((x) >> LL_W_MEM_POOL_MAX_OFFSET_BITS) \ & (LL_W_MEM_POOL_MAX_BUFFERS - 1)), \ ((x) & ((1ul << LL_W_MEM_POOL_MAX_OFFSET_BITS)-1)) \ << LL_MEM_POOL_ALIGN_BITS))) #else #define LL_EDGE_GET_WRITABLE(x) ((w_edge*) (LL_EDGE_INDEX(x))) #endif #define LL_W_EDGE_CREATE(edge) (((w_edge*) (long) (edge))->we_public_id) #ifdef LL_WRITABLE_USE_MEMORY_POOL #ifdef LL_EDGE32 # if (32 - LL_BITS_LEVEL) < (LL_W_MEM_POOL_MAX_BUFFERS_BITS + LL_W_MEM_POOL_MAX_OFFSET_BITS) # error "Not enough bits to encode the w_edge position in the memory pool" # endif #else # if (64 - LL_BITS_LEVEL) < (LL_W_MEM_POOL_MAX_BUFFERS_BITS + LL_W_MEM_POOL_MAX_OFFSET_BITS) # error "Not enough bits to encode the w_edge position in the memory pool" # endif #endif #endif //==========================================================================// // Class: w_edge // //==========================================================================// /** * A writable edge */ class w_edge { public: union { struct { /// The target endpoint node_t we_target; /// The source endpoint node_t we_source; }; /// The next edge in the free list struct w_edge* we_next; }; /// A numerical ID edge_t we_numerical_id; union { // TODO Check if using union here is okay /// A numerical ID of the reverse edge edge_t we_reverse_numerical_id; /// The public node ID edge_t we_public_id; }; #ifdef LL_TIMESTAMPS /// The creation timestamp long we_timestamp_creation; /// The deletion timestamp long we_timestamp_deletion; #else /// Deletion bool we_deleted; #endif /// The property spinlock ll_spinlock_t we_properties_spinlock; /// The 32-bit properties uint32_t we_properties_32[LL_MAX_EDGE_PROPERTY_ID]; /// The 64-bit properties: Property ID --> (destructor, value) std::pair<void (*)(const uint64_t&), uint64_t> we_properties_64[LL_MAX_EDGE_PROPERTY_ID]; #ifdef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES /// The frozen edge that this edge supersedes edge_t we_supersedes; #endif public: /** * Create an instance of type w_edge */ w_edge() { we_properties_spinlock = 0; memset(we_properties_32, 0, sizeof(we_properties_32)); memset(we_properties_64, 0, sizeof(we_properties_64)); #ifdef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES we_supersedes = LL_NIL_EDGE; #endif } /** * Destructor */ ~w_edge() { clear(); } /** * Clear */ void clear(void) { for (int i = 0; i < LL_MAX_EDGE_PROPERTY_ID; i++) { std::pair<void (*)(const uint64_t&), uint64_t>& p = we_properties_64[i]; if (p.first != NULL) p.first(p.second); } memset(we_properties_32, 0, sizeof(we_properties_32)); memset(we_properties_64, 0, sizeof(we_properties_64)); we_properties_spinlock = 0; #ifdef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES we_supersedes = LL_NIL_EDGE; #endif #ifdef LL_TIMESTAMPS we_timestamp_deletion = LONG_MAX; #else we_deleted = false; #endif } /** * Determine whether the edge still exists * * @return true if it still exists */ inline bool exists(void) { #ifdef LL_TIMESTAMPS return we_timestamp_deletion == LONG_MAX; #else return !we_deleted; #endif } /** * Get the value of a 32-bit property * * @param property_id the property ID * @return the value, or 0 if not found */ template <typename T> inline T get_property_32(int property_id) { return we_properties_32[property_id]; } /** * Set the value of a 32-bit property * * @param property_id the property ID * @param value the value */ template <typename T> void set_property_32(int property_id, T value) { we_properties_32[property_id] = value; } /** * Atomicaly add the value to a 32-bit property * * @param property_id the property ID * @param value the value * @return the new value */ template <typename T> T add_property_32(int property_id, T value) { return __sync_add_and_fetch(&we_properties_32[property_id], value); } /** * Get the value of a 64-bit property * * @param property_id the property ID * @return the value, or 0 if not found */ template <typename T> inline T get_property_64(int property_id) { return we_properties_64[property_id].second; } /** * Set the value of a 64-bit property * * @param property_id the property ID * @param value the value * @param destructor the destructor */ template <typename T> void set_property_64(int property_id, T value, void (*destructor)(const uint64_t&) = NULL) { ll_spinlock_acquire(&we_properties_spinlock); std::pair<void (*)(const uint64_t&), uint64_t>& p = we_properties_64[property_id]; if (p.first != NULL) p.first(p.second); p.first = destructor; p.second = value; ll_spinlock_release(&we_properties_spinlock); } /** * Atomicaly add the value to a 64-bit property. This ASSUMES that the value destructor * is NULL, so please use with caution. * * @param property_id the property ID * @param value the value * @return the new value */ template <typename T> T add_property_64(int property_id, T value) { return __sync_add_and_fetch(&we_properties_64[property_id].second, value); } }; //==========================================================================// // Edge Allocator and Deallocator // //==========================================================================// /** * The writable edge NOOP deallocator */ struct w_edge_noop { /** * Do nothing with a writable edge * * @param edge the writable edge */ void operator() (w_edge* edge) { } }; #ifdef LL_WRITABLE_USE_MEMORY_POOL static ll_memory_pool __w_pool; /** * Generic allocator */ struct w_generic_allocator { /** * Allocate a new object * * @param size the number of bytes * @return the new object */ void* operator() (size_t size) { return __w_pool.allocate<char>(size); } }; /** * The writable edge allocator */ struct w_edge_allocator { /** * Allocate a new writable edge * * @return the writable edge */ w_edge* operator() (void) { w_edge* w = __w_pool.allocate<w_edge>(); new (w) w_edge(); return w; } /** * Allocate a new writable edge * * @param o_chunk the pointer to store the chunk number * @param o_offset the pointer to store the offset within the chunk * @return the writable edge */ w_edge* operator() (size_t* o_chunk, size_t* o_offset) { w_edge* w = __w_pool.allocate<w_edge>(1, o_chunk, o_offset); new (w) w_edge(); return w; } }; /** * The writable edge deallocator */ struct w_edge_deallocator { /** * Deallocate a writable edge * * @param edge the writable edge */ void operator() (w_edge* edge) { // Nothing to do // XXX This does not call destructor on edge property values } }; /** * Helper types */ /// The out-edges typedef ll_growable_array<w_edge*, 4, w_edge_deallocator, false, w_generic_allocator, ll_nop_deallocator<void*>, false> ll_w_out_edges_t; /// The in-edges typedef ll_growable_array<w_edge*, 4, w_edge_noop, false, w_generic_allocator, ll_nop_deallocator<void*>, false> ll_w_in_edges_t; #else /* LL_WRITABLE_USE_MEMORY_POOL */ #define FREE_W_EDGES_LENGTH (4*8) static w_edge* __free_w_edges[FREE_W_EDGES_LENGTH] = { THIRTY_TWO_NULLS }; /** * The writable edge allocator */ struct w_edge_allocator { /** * Allocate a new writable edge * * @return the writable edge */ w_edge* operator() (void) { for (int i = 0; i < FREE_W_EDGES_LENGTH; i += 8) { w_edge* x = __free_w_edges[i]; if (x != NULL) { if (__sync_bool_compare_and_swap(&__free_w_edges[i], x, x->we_next)) { x->we_next = (w_edge*) 0; return x; } } } return new w_edge(); } }; /** * The writable edge deallocator */ struct w_edge_deallocator { /** * Deallocate a writable edge * * @param edge the writable edge */ void operator() (w_edge* edge) { int i = 8 * (int) (((((long) edge) / sizeof(w_edge)) >> 6) % (FREE_W_EDGES_LENGTH / 8)); // or: int i = omp_get_thread_num() << 3; w_edge* n = edge; n->clear(); w_edge* x; do { x = __free_w_edges[i]; n->we_next = x; __COMPILER_FENCE; } while (!__sync_bool_compare_and_swap(&__free_w_edges[i], x, n)); /* If we are guaranteed to be single-threaded, we can switch to: w_edge* x = __free_w_edges[i]; n->we_next = x; __free_w_edges[i] = n;*/ } }; /** * Delete all w_edge's in the free list */ inline void delete_free_w_edges(void) { #pragma omp critical { for (int i = 0; i < FREE_W_EDGES_LENGTH; i++) { w_edge* x = __free_w_edges[i]; if (x != NULL) { __free_w_edges[i] = NULL; while (x != NULL) { w_edge* next = x->we_next; delete x; x = next; } } } } } /** * Helper types */ /// The out-edges typedef ll_growable_array<w_edge*, 4, w_edge_deallocator> ll_w_out_edges_t; /// The in-edges typedef ll_growable_array<w_edge*, 4, w_edge_noop, false> ll_w_in_edges_t; #endif //==========================================================================// // Class: w_node // //==========================================================================// /** * A writable node */ class w_node { public: /// Update lock ll_spinlock_t wn_lock; /// The out-edges ll_w_out_edges_t wn_out_edges; /// The in-edges ll_w_in_edges_t wn_in_edges; union { struct { // TODO short or unigned or int? /// The number of added and not yet deleted out-edges unsigned short wn_out_edges_delta; /// The number of added and not yet deleted in-edges unsigned short wn_in_edges_delta; /// The number of deleted out-edges unsigned short wn_num_deleted_out_edges; /// The number of deleted in-edges unsigned short wn_num_deleted_in_edges; }; /// The "next" pointer in the free list w_node* wn_next; }; #ifdef LL_TIMESTAMPS /// The creation timestamp long wn_timestamp_creation; /// The update timestamp long wn_timestamp_update; /// The deletion timestamp long wn_timestamp_deletion; #else /// The deletion timestamp bool wn_deleted; #endif /** * Create an instance of w_node */ w_node(void) { wn_lock = 0; wn_out_edges_delta = 0; wn_in_edges_delta = 0; wn_next = NULL; #ifdef LL_TIMESTAMPS wn_timestamp_creation = 0; wn_timestamp_update = 0; wn_timestamp_deletion = LONG_MAX; #else wn_deleted = false; #endif wn_num_deleted_out_edges = 0; wn_num_deleted_in_edges = 0; } /** * Clear the data */ void clear(void) { wn_lock = 0; wn_out_edges_delta = 0; wn_in_edges_delta = 0; wn_out_edges.clear(); wn_in_edges.clear(); #ifdef LL_TIMESTAMPS wn_timestamp_creation = 0; wn_timestamp_update = 0; wn_timestamp_deletion = LONG_MAX; #else wn_deleted = false; #endif wn_num_deleted_out_edges = 0; wn_num_deleted_in_edges = 0; } /** * Determine whether the node still exists * * @return true if it still exists */ inline bool exists(void) { #ifdef LL_TIMESTAMPS return wn_timestamp_deletion == LONG_MAX; #else return !wn_deleted; #endif } }; //==========================================================================// // Node Allocator and Deallocator // //==========================================================================// #ifdef LL_WRITABLE_USE_MEMORY_POOL /** * The writable node allocator */ template <typename Output = w_node*> struct w_node_allocator_ext { /** * Allocate a new writable node * * @return the writable node */ Output operator() (void) { w_node* w = __w_pool.allocate<w_node>(); new (w) w_node(); return (Output) w; } }; /** * The default writable node allocator */ typedef struct w_node_allocator_ext<> w_node_allocator; /** * The writable node deallocator */ template <typename Input = w_node*> struct w_node_deallocator_ext { /** * Deallocate a writable node * * @param node the writable node */ void operator() (Input node) { // Nothing to do // XXX This does not call destructor on node property values } }; /** * The default writable node deallocator */ typedef struct w_node_deallocator_ext<> w_node_deallocator; /** * Delete all w_node's in the free list */ inline void ll_free_w_pool(void) { __w_pool.free(); } #else /* LL_WRITABLE_USE_MEMORY_POOL */ #define FREE_W_NODES_LENGTH 4 static w_node* __free_w_nodes[FREE_W_NODES_LENGTH] = { FOUR_NULLS }; /** * The writable node allocator */ template <typename Output = w_node*> struct w_node_allocator_ext { /** * Allocate a new writable node * * @return the writable node */ Output operator() (void) { for (int i = 0; i < FREE_W_NODES_LENGTH; i++) { w_node* x = __free_w_nodes[i]; if (x != NULL) { if (__sync_bool_compare_and_swap(&__free_w_nodes[i], x, x->wn_next)) { x->wn_next = (w_node*) 0; return (Output) x; } } } return (Output) (new w_node()); } }; /** * The default writable node allocator */ typedef struct w_node_allocator_ext<> w_node_allocator; #ifdef PARALLEL_FREE_W_NODES /** * The writable node deallocator */ template <typename Input = w_node*> struct w_node_deallocator_ext { /** * Deallocate a writable node * * @param node the writable node */ void operator() (Input node) { //delete (w_node*) node; int i = (int) (((((long) node) / sizeof(w_node)) >> 6) % FREE_W_NODES_LENGTH); w_node* n = (w_node*) node; n->clear(); w_node* x; do { x = __free_w_nodes[i]; n->wn_next = x; __COMPILER_FENCE; } while (!__sync_bool_compare_and_swap(&__free_w_nodes[i], x, n)); } }; /** * The default writable node allocator */ typedef struct w_node_deallocator_ext<> w_node_deallocator; /** * Delete all w_node's in the free list */ inline void delete_free_w_nodes(void) { for (int i = 0; i < FREE_W_NODES_LENGTH; i++) { while (true) { w_node* x = __free_w_nodes[i]; if (x == NULL) break; if (__sync_bool_compare_and_swap(&__free_w_nodes[i], x, NULL)) { while (x != NULL) { w_node* next = x->wn_next; delete x; x = next; } break; } } } } #else /** * The writable node deallocator */ template <typename Input = w_node*> struct w_node_deallocator_ext { /** * Deallocate a writable node * * @param node the writable node */ void operator() (Input node) { // Must be run inside a critical section w_node* n = (w_node*) node; n->clear(); int i = rand() % FREE_W_NODES_LENGTH; //or: (int) ((((long) node) >> 8) % FREE_W_NODES_LENGTH); /*n->wn_next = __free_w_nodes; __free_w_nodes = n;*/ n->wn_next = *((w_node**) &__free_w_nodes[i]); *((w_node**) &__free_w_nodes[i]) = n; } }; /** * The default writable node allocator */ typedef struct w_node_deallocator_ext<> w_node_deallocator; /** * Delete all w_node's in the free list */ inline void delete_free_w_nodes(void) { #pragma omp critical { for (int i = 0; i < FREE_W_NODES_LENGTH; i++) { w_node* x = *((w_node**) &__free_w_nodes[i]); if (x != NULL) { //__free_w_nodes = NULL; *((w_node**) &__free_w_nodes[i]) = NULL; while (x != NULL) { w_node* next = x->wn_next; delete x; x = next; } } } } } #endif #endif /* ! LL_WRITABLE_USE_MEMORY_POOL */ #endif /* LL_WRITABLE_ELEMENTS_H_ */
DRB015-outofbounds-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The outmost loop is be parallelized. But the inner level loop has out of bound access for b[i][j] when j equals to 0. This will case memory access of a previous row's last element. For example, an array of 4x4: j=0 1 2 3 i=0 x x x x 1 x x x x 2 x x x x 3 x x x x outer loop: i=2, inner loop: j=0 array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3] due to linearized row-major storage of the 2-D array. This causes loop-carried data dependence between i=2 and i=1. Data race pair: b[i][j]@80:7 vs. b[i][j-1]@80:15 */ #include "omprace.h" #include <omp.h> #include <stdlib.h> int main(int argc, char* argv[]) { omprace_init(); int i,j; int len=100; if (argc>1) len = atoi(argv[1]); int n=len, m=len; double b[n][m]; #pragma omp parallel for private(j) for (i=1;i<n;i++) for (j=0;j<m;j++) // Note there will be out of bound access b[i][j]=b[i][j-1]; omprace_fini(); return 0; }
forces.c
/* * Compute forces and accumulate the virial and the potential */ extern double epot, vir; void forces(int npart, double x[], double f[], double side, double rcoff) { int i, j; double sideh, rcoffs; double xi,yi,zi,fxi,fyi,fzi,xx,yy,zz; double rd, rrd, rrd2, rrd3, rrd4, rrd6, rrd7, r148; double forcex, forcey, forcez; #pragma omp master { vir = 0.0; epot = 0.0; } #pragma omp barrier sideh = 0.5*side; rcoffs = rcoff*rcoff; #pragma omp for reduction(+:epot,vir) schedule(dynamic) for (i=0; i<npart*3; i+=3) { xi = x[i]; yi = x[i+1]; zi = x[i+2]; fxi = 0.0; fyi = 0.0; fzi = 0.0; for (j=i+3; j<npart*3; j+=3) { xx = xi-x[j]; yy = yi-x[j+1]; zz = zi-x[j+2]; if (xx<-sideh) xx += side; if (xx> sideh) xx -= side; if (yy<-sideh) yy += side; if (yy> sideh) yy -= side; if (zz<-sideh) zz += side; if (zz> sideh) zz -= side; rd = xx*xx+yy*yy+zz*zz; if (rd <= rcoffs) { rrd = 1.0 / rd; rrd2 = rrd * rrd; rrd3 = rrd2 * rrd; rrd4 = rrd2 * rrd2; rrd6 = rrd2 * rrd4; rrd7 = rrd6 * rrd; epot += (rrd6 - rrd3); r148 = rrd7 - 0.5 * rrd4; vir -= rd * r148; forcex = xx * r148; fxi += forcex; #pragma omp atomic f[j] -= forcex; forcey = yy * r148; fyi += forcey; #pragma omp atomic f[j + 1] -= forcey; forcez = zz * r148; fzi += forcez; #pragma omp atomic f[j + 2] -= forcez; } } #pragma omp atomic f[i] += fxi; #pragma omp atomic f[i+1] += fyi; #pragma omp atomic f[i+2] += fzi; } }
templatemath.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * templatemath.h * * Created on: Jan 1, 2016 * Author: agibsonccc */ #ifndef TEMPLATEMATH_H_ #define TEMPLATEMATH_H_ #include <dll.h> #include <pointercast.h> #include <platformmath.h> #include <DataTypeUtils.h> #define BFLOAT16_MAX_VALUE 32737. #define HALF_MAX_VALUE 65504. #define FLOAT_MAX_VALUE 3.4028235E38 #define DOUBLE_MAX_VALUE 1.7976931348623157E308 #define FLOAT_MIN_NORMAL 1.17549435e-38 #ifndef M_E #define M_E 2.718281828459 #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif namespace nd4j { #ifdef __CUDACC__ #endif namespace math { template<typename T> math_def inline T nd4j_abs(T value); template<typename T> math_def inline void nd4j_swap(T &val1, T &val2); template<typename T> math_def inline T nd4j_max(T val1, T val2); template<typename T> math_def inline T nd4j_min(T val1, T val2); template <typename T> math_def inline bool nd4j_eq(T val1, T val2, double eps); template<typename T, typename Z> math_def inline Z nd4j_re(T val1, T val2); template<typename T, typename Z> math_def inline Z nd4j_rint(T val1); template<typename T, typename Z> math_def inline Z nd4j_copysign(T val1, T val2); template <typename T, typename Z> math_def inline Z nd4j_softplus(T val); template <typename T> math_def inline T nd4j_rotl(T val, T shift); template <typename T> math_def inline T nd4j_rotr(T val, T shift); //#ifndef __CUDACC__ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length); //#endif template<typename T, typename Z> math_def inline Z nd4j_ceil(T val1); template<typename T> math_def inline bool nd4j_isnan(T val1); template<typename T> math_def inline bool nd4j_isinf(T val1); template<typename T> math_def inline bool nd4j_isfin(T val1); template<typename T, typename Z> math_def inline Z nd4j_cos(T val); template<typename T, typename Z> math_def inline Z nd4j_cosh(T val); template<typename X, typename Z> math_def inline Z nd4j_exp(X val); template<typename T, typename Z> math_def inline Z nd4j_floor(T val); template<typename X, typename Z> math_def inline Z nd4j_log(X val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2); template<typename T, typename Z> math_def inline Z nd4j_round(T val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X num, Y denom); template<typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X num, Y denom); template<typename T, typename Z> math_def inline Z nd4j_erf(T num); template<typename T, typename Z> math_def inline Z nd4j_erfc(T num); math_def inline int32_t floatToRawIntBits(float d) { union { float f; int32_t i; } tmp; tmp.f = d; return tmp.i; } math_def inline float intBitsToFloat(int32_t i) { union { float f; int32_t i; } tmp; tmp.i = i; return tmp.f; } math_def inline float mulsignf(float x, float y) { return intBitsToFloat(floatToRawIntBits(x) ^ (floatToRawIntBits(y) & (1 << 31))); } math_def inline float copysignfk(float x, float y) { return intBitsToFloat((floatToRawIntBits(x) & ~(1 << 31)) ^ (floatToRawIntBits(y) & (1 << 31))); } template<typename T, typename Z> math_def inline Z nd4j_sigmoid(T val) { return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val)); } template<typename T, typename Z> math_def inline Z nd4j_elu(T val, T alpha) { if (val >= (T) 0.f) return val; return static_cast<Z>(alpha) * (nd4j_exp<T, Z>(val) - static_cast<Z>(1.0f)); } template<typename T, typename Z> math_def inline Z nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) return alpha * val; else return val; } template<typename T, typename Z> math_def inline Z nd4j_eluderivative(T val, T alpha) { if (val >= static_cast<T>(0.0f)) return static_cast<Z>(1.0f); return static_cast<Z>(alpha) * nd4j_exp<T, Z>(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } template<typename T, typename Z> math_def inline Z nd4j_sin(T val); template<typename T, typename Z> math_def inline Z nd4j_sinh(T val); template<typename T, typename Z> math_def inline Z nd4j_softplus(T val) { return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val)); } template<typename T, typename Z> math_def inline Z nd4j_softsign(T val) { return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val)); } template<typename X, typename Z> math_def inline Z nd4j_sqrt(X val); template<typename X, typename Z> math_def inline Z nd4j_tanh(X val); template<typename T, typename Z> math_def inline Z nd4j_tan(T val); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2) { return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2)); } template<typename T, typename Z> math_def inline Z nd4j_tan(T tval) { return p_tan<Z>(static_cast<Z>(tval)); } template<typename T, typename Z> math_def inline Z nd4j_tanhderivative(T val) { Z tanh = nd4j_tanh<T,Z>(val); return (Z) 1.0f - tanh * tanh; } template <typename T, typename Z> math_def inline T nd4j_sigmoidderivative(T val) { Z sigmoid = nd4j_sigmoid<T,Z>(val); return sigmoid * ((Z) 1.0f - sigmoid); } template<typename T, typename Z> math_def inline T nd4j_softsignderivative(T val) { T y = (T) 1.0f + nd4j_abs(val); return (Z) 1.0f / (y * y); } template<typename T, typename Z> math_def inline T nd4j_sgn(T val) { return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f; } template<typename T, typename Z> math_def inline Z nd4j_sign(T val) { return nd4j_sgn<T, Z>(val); } template<typename T, typename Z> math_def inline Z nd4j_signum(T val) { return nd4j_sgn<T, Z>(val); } template<typename X, typename Z> math_def inline Z nd4j_gamma(X a); template<typename X, typename Z> math_def inline Z nd4j_lgamma(X x); //#ifndef __CUDACC__ /* template<> math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) { float16 dot = (float16) 0.0f; // TODO: since we can't use simd on unions, we might use something else here. for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } */ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length) { Z dot = (Z)0.0f; for(int e = 0; e < length; e++) { dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]); } return dot; } //#endif template<typename T, typename Z> math_def inline Z nd4j_acos(T val); template<typename T, typename Z> math_def inline Z nd4j_sech(T val); template<typename T, typename Z> math_def inline Z nd4j_acosh(T val); template<typename T, typename Z> math_def inline Z nd4j_asin(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val) { //Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x) return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val); } template<typename T, typename Z> math_def inline Z nd4j_atan(T val); template<typename T, typename Z> math_def inline Z nd4j_atanh(T val); template<> math_def inline float16 nd4j_abs<float16>(float16 value) { #ifdef NATIVE_HALFS if (value < (float16) 0.f) { return float16(__hneg(value.data)); } else return value; #else return (float16) fabsf((float) value); #endif } template<> math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) { return (bfloat16) fabsf((float) value); } template<> math_def inline float nd4j_abs<float>(float value) { return fabsf(value); } template<> math_def inline double nd4j_abs<double>(double value) { return fabs(value); } template<> math_def inline int nd4j_abs<int>(int value) { return abs(value); } template<> math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) { return llabs(value); } template<> math_def inline bool nd4j_abs<bool>(bool value) { return value; } template<> math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) { return value; } template<> math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) { return value; } template<> math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) { return value; } template<> math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) { return value; } template<> math_def inline int8_t nd4j_abs<int8_t>(int8_t value) { return value < 0 ? -value : value; } template<> math_def inline int16_t nd4j_abs<int16_t>(int16_t value) { return value < 0 ? -value : value; } template<> math_def inline bool nd4j_isnan<float16>(float16 value) { return *(value.data.getXP()) == 0x7fffU; } template<> math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) { return value == bfloat16::nan(); //0x7fffU; } template<> math_def inline bool nd4j_isnan<float>(float value) { return value != value; } template<> math_def inline bool nd4j_isnan<double>(double value) { return value != value; } template<> math_def inline bool nd4j_isnan<int>(int value) { return false; } template<> math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isnan<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isnan<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isnan<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) { return false; } template<> math_def inline bool nd4j_isinf<float16>(float16 value) { return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) { return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<float>(float value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<double>(double value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<int>(int value) { return false; } template<> math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isinf<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isinf<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isinf<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) { return false; } template<typename T> math_def inline bool nd4j_isfin(T value) { return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value); } template<> math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) { return (float16) copysignf((float) val1, (float) val2); } template<> math_def inline float nd4j_copysign<float>(float val1, float val2) { return copysignf(val1, val2); } template<> math_def inline double nd4j_copysign<double>(double val1, double val2) { return copysign(val1, val2); } template<> math_def inline int nd4j_copysign<int>(int val1, int val2) { if (val2 < 0) return -(nd4j_abs<int>(val1)); else return nd4j_abs<int>(val1); } template<> math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1)); else return nd4j_abs<Nd4jLong>(val1); } template<> math_def inline bool nd4j_max(bool val1, bool val2) { return (val1 || val2) ? true : false; } template<typename T> math_def inline T nd4j_max(T val1, T val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline bool nd4j_min(bool val1, bool val2) { return (val1 && val2) ? true : false; } template<typename T> math_def inline T nd4j_min(T val1, T val2) { return val1 < val2 ? val1 : val2; } template <typename T> math_def inline bool nd4j_eq(T d1, T d2, double eps) { if (nd4j::math::nd4j_isinf<T>(d1) && nd4j::math::nd4j_isinf<T>(d2)) { if (d1 > 0 && d2 > 0) return true; else if (d1 < 0 && d2 < 0) return true; else return false; } auto diff = static_cast<double>(nd4j::math::nd4j_abs<T>(d1 - d2)); // works well except in the range of very large numbers if (diff <= eps) return true; // Knuth approach // works well except in the range of very small numbers if (diff <= nd4j::math::nd4j_max<double>(nd4j::math::nd4j_abs<double>(static_cast<double>(d1)), nd4j::math::nd4j_abs<double>(static_cast<double>(d2))) * eps) return true; return false; } template <typename X, typename Z> math_def inline Z nd4j_ceil(X val) { return static_cast<Z>(p_ceil<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_round(X val) { return static_cast<Z>(p_round<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_asin(X val) { return p_asin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atan(X val) { return p_atan<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atanh(X val) { return p_atanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cosh(X val) { return p_cosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_rint(X val) { return p_rint<X>(val); } template <typename X, typename Z> math_def inline Z nd4j_sinh(X val) { return p_sinh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acos(X val) { return p_acos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sech(X val) { return static_cast<Z>(1) / nd4j_cosh<X,Z>(val); } template <typename X, typename Z> math_def inline Z nd4j_acosh(X val) { return p_acosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cos(X val) { return p_cos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_exp(X val) { return p_exp<X>(val); } template<typename X, typename Z> math_def inline Z nd4j_floor(X val) { return static_cast<Z>(p_floor<X>(val)); } template<typename X, typename Z> math_def inline Z nd4j_log(X val) { return static_cast<Z>(p_log<X>(val)); } /** * This func is special case - it must return floating point value, and optionally Y arg can be floating point argument * @tparam X * @tparam Y * @tparam Z * @param val * @param val2 * @return */ template <> math_def inline float nd4j_pow(float val, float val2) { return p_pow<float>(val, val2); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2) { return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } /** * LogGamma(a) - float point extension of ln(n!) **/ template <typename X, typename Z> math_def inline Z nd4j_lgamma(X x) { // if (x <= X(0.0)) // { // std::stringstream os; // os << "Logarithm of Gamma has sence only for positive values, but " << x << " was given."; // throw std::invalid_argument( os.str() ); // } if (x < X(12.0)) { return nd4j_log<Z,Z>(nd4j_gamma<X,Z>(x)); } // Abramowitz and Stegun 6.1.41 // Asymptotic series should be good to at least 11 or 12 figures // For error analysis, see Whittiker and Watson // A Course in Modern Analysis (1927), page 252 static const double c[8] = { 1.0/12.0, -1.0/360.0, 1.0/1260.0, -1.0/1680.0, 1.0/1188.0, -691.0/360360.0, 1.0/156.0, -3617.0/122400.0 }; double z = Z(1.0 / Z(x * x)); double sum = c[7]; for (int i = 6; i >= 0; i--) { sum *= z; sum += c[i]; } double series = sum / Z(x); static const double halfLogTwoPi = 0.91893853320467274178032973640562; return Z((double(x) - 0.5) * nd4j_log<X,double>(x) - double(x) + halfLogTwoPi + series); } template<typename T> math_def inline T nd4j_re(T val1, T val2) { if (val1 == (T) 0.0f && val2 == (T) 0.0f) return (T) 0.0f; return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X val, Y val2) { return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X val, Y val2) { return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Z> math_def inline Z nd4j_sin(X val) { return p_sin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sqrt(X val) { return p_sqrt<Z>(static_cast<Z>(val)); } template <typename X> math_def inline X neg_tanh(X val) { X o = static_cast<X>(1.0f); X t = static_cast<X>(2.0f); X e = static_cast<X>(M_E); auto p = nd4j::math::nd4j_pow<X, X, X>(e, val * t); return (p - o)/ (p + o); } template <typename X> math_def inline X pos_tanh(X val) { X o = static_cast<X>(1.0f); X t = static_cast<X>(-2.0f); X e = static_cast<X>(M_E); auto p = nd4j::math::nd4j_pow<X, X, X>(e, val * t); return (o - p) / (o + p); } math_def inline float neu_tanh(float val, float sign) { float e(M_E); float av = sign * val; auto p = nd4j::math::nd4j_pow<float, float, float>(e, -av * 2.f); return (1 - p) / (1 + p); } template <> math_def inline float nd4j_tanh(float val) { float sign = copysignfk(1.0f, val); return sign * neu_tanh(val, sign); } template <typename X, typename Z> math_def inline Z nd4j_tanh(X val) { return val <= 0 ? neg_tanh(val) : pos_tanh(val); } template <typename T> math_def inline T nd4j_rotl(T val, T shift) { return p_rotl<T>(val, shift); } template <typename T> math_def inline T nd4j_rotr(T val, T shift) { return p_rotr<T>(val, shift); } template <typename X, typename Z> math_def inline Z nd4j_erf(X val) { return p_erf<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erfc(X val) { return p_erfc<Z>(static_cast<Z>(val)); } template<typename T> math_def inline void nd4j_swap(T &val1, T &val2) { T temp = val1; val1=val2; val2=temp; }; template <typename X, typename Z> math_def inline Z nd4j_gamma(X a) { // nd4j_lgamma<X,Z>(a); // return (Z)std::tgamma(a); // Split the function domain into three intervals: // (0, 0.001), [0.001, 12), and (12, infinity) /////////////////////////////////////////////////////////////////////////// // First interval: (0, 0.001) // // For small a, 1/Gamma(a) has power series a + gamma a^2 - ... // So in this range, 1/Gamma(a) = a + gamma a^2 with error on the order of a^3. // The relative error over this interval is less than 6e-7. const double eulerGamma = 0.577215664901532860606512090; // Euler's gamma constant if (a < X(0.001)) return Z(1.0 / ((double)a * (1.0 + eulerGamma * (double)a))); /////////////////////////////////////////////////////////////////////////// // Second interval: [0.001, 12) if (a < X(12.0)) { // The algorithm directly approximates gamma over (1,2) and uses // reduction identities to reduce other arguments to this interval. double y = (double)a; int n = 0; bool argWasLessThanOne = y < 1.0; // Add or subtract integers as necessary to bring y into (1,2) // Will correct for this below if (argWasLessThanOne) { y += 1.0; } else { n = static_cast<int>(floor(y)) - 1; // will use n later y -= n; } // numerator coefficients for approximation over the interval (1,2) static const double p[] = { -1.71618513886549492533811E+0, 2.47656508055759199108314E+1, -3.79804256470945635097577E+2, 6.29331155312818442661052E+2, 8.66966202790413211295064E+2, -3.14512729688483675254357E+4, -3.61444134186911729807069E+4, 6.64561438202405440627855E+4 }; // denominator coefficients for approximation over the interval (1,2) static const double q[] = { -3.08402300119738975254353E+1, 3.15350626979604161529144E+2, -1.01515636749021914166146E+3, -3.10777167157231109440444E+3, 2.25381184209801510330112E+4, 4.75584627752788110767815E+3, -1.34659959864969306392456E+5, -1.15132259675553483497211E+5 }; double num = 0.0; double den = 1.0; double z = y - 1; for (auto i = 0; i < 8; i++) { num = (num + p[i]) * z; den = den * z + q[i]; } double result = num / den + 1.0; // Apply correction if argument was not initially in (1,2) if (argWasLessThanOne) { // Use identity gamma(z) = gamma(z+1)/z // The variable "result" now holds gamma of the original y + 1 // Thus we use y-1 to get back the orginal y. result /= (y - 1.0); } else { // Use the identity gamma(z+n) = z*(z+1)* ... *(z+n-1)*gamma(z) for (auto i = 0; i < n; i++) result *= y++; } return Z(result); } /////////////////////////////////////////////////////////////////////////// // Third interval: [12, infinity) if (a > 171.624) { // Correct answer too large to display. Force +infinity. return Z(DOUBLE_MAX_VALUE); // return DataTypeUtils::infOrMax<Z>(); } return nd4j::math::nd4j_exp<Z,Z>(nd4j::math::nd4j_lgamma<X,Z>(a)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_igamma(X a, Y x) { Z aim = nd4j_pow<X, X, Z>(x, a) / (nd4j_exp<X, Z>(x) * nd4j_gamma<Y, Z>(a)); auto sum = Z(0.); auto denom = Z(1.); if (a <= X(0.000001)) //throw std::runtime_error("Cannot calculate gamma for a zero val."); return Z(0); for (int i = 0; Z(1./denom) > Z(1.0e-12); i++) { denom *= (a + i); sum += nd4j_pow<X, int, Z>(x, i) / denom; } return aim * sum; } template <typename X, typename Y, typename Z> math_def inline Z nd4j_igammac(X a, Y x) { return Z(1.) - nd4j_igamma<X, Y, Z>(a, x); } #ifdef __CUDACC__ namespace atomics { template <typename T> inline __device__ T nd4j_atomicAdd(T* address, T val); template <typename T> inline __device__ T nd4j_atomicSub(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMul(T* address, T val); template <typename T> inline __device__ T nd4j_atomicDiv(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMin(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMax(T* address, T val); template <> inline __device__ int32_t nd4j_atomicMin<int32_t>(int32_t* address, int32_t val) { return atomicMin(address, val); } template <> inline __device__ uint32_t nd4j_atomicMin<uint32_t>(uint32_t* address, uint32_t val) { return atomicMin(address, val); } template <> inline __device__ float nd4j_atomicMin<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = __float_as_int(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ double nd4j_atomicMin<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ uint64_t nd4j_atomicMin<uint64_t>(uint64_t* address, uint64_t val) { #if __CUDA_ARCH__ >= 350 return atomicMin((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_min((unsigned long long)val, assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ Nd4jLong nd4j_atomicMin<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { #if __CUDA_ARCH__ >= 350 return atomicMin((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = (unsigned long long)val, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_min(val, (Nd4jLong)assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ int16_t nd4j_atomicMin<int16_t>(int16_t* address, int16_t val) { int32_t temp = *address; *address = atomicMin(&temp, (int)val); return *address; } template <> inline __device__ bfloat16 nd4j_atomicMin<bfloat16>(bfloat16* address, bfloat16 val) { return bfloat16(nd4j_atomicMin<int16_t>(&address->_data, val._data)); } template <> inline __device__ float16 nd4j_atomicMin<float16>(float16* address, float16 val) { return float16(nd4j_atomicMin<int16_t>(reinterpret_cast<int16_t*>(&address->data), (int16_t)val.data)); } template <> inline __device__ int32_t nd4j_atomicMax<int32_t>(int32_t* address, int32_t val) { return atomicMax(address, val); } template <> inline __device__ uint32_t nd4j_atomicMax<uint32_t>(uint32_t* address, uint32_t val) { return atomicMax(address, val); } template <> inline __device__ double nd4j_atomicMax<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_max(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float nd4j_atomicMax<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = __float_as_int(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_max(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ uint8_t nd4j_atomicMin<uint8_t>(uint8_t* address, uint8_t val) { uint32_t temp = *address; *address = atomicMin(&temp, (uint32_t)val); return *address; } template <> inline __device__ int8_t nd4j_atomicMin<int8_t>(int8_t* address, int8_t val) { int32_t temp = *address; *address = atomicMin(&temp, (int)val); return *address; } template <> inline __device__ uint16_t nd4j_atomicMin<uint16_t>(uint16_t* address, uint16_t val) { uint32_t temp = *address; *address = atomicMin(&temp, (uint32_t)val); return *address; } template <> inline __device__ uint8_t nd4j_atomicMax<uint8_t>(uint8_t* address, uint8_t val) { uint32_t temp = *address; *address = atomicMax(&temp, (uint32_t)val); return *address; } template <> inline __device__ int8_t nd4j_atomicMax<int8_t>(int8_t* address, int8_t val) { int32_t temp = *address; *address = atomicMax(&temp, (int)val); return *address; } template <> inline __device__ uint16_t nd4j_atomicMax<uint16_t>(uint16_t* address, uint16_t val) { uint32_t temp = *address; *address = atomicMax(&temp, (uint32_t)val); return *address; } template <> inline __device__ int16_t nd4j_atomicMax<int16_t>(int16_t* address, int16_t val) { int32_t temp = *address; *address = atomicMax(&temp, (int32_t)val); return *address; } template <> inline __device__ float16 nd4j_atomicMax<float16>(float16* address, float16 val) { auto address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = nd4j_max((float16) old.B.H, val); fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = nd4j_max((float16) old.B.L, val); fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ bfloat16 nd4j_atomicMax<bfloat16>(bfloat16* address, bfloat16 val) { auto address_as_ull = (int*) address; long addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = nd4j_max(old.B.H, val); fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = nd4j_max(old.B.L, val); fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ uint64_t nd4j_atomicMax<uint64_t>(uint64_t* address, uint64_t val) { #if __CUDA_ARCH__ >= 350 return atomicMax((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_max((unsigned long long)val, assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ Nd4jLong nd4j_atomicMax<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; //return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, (unsigned long long)nd4j_max(val, (Nd4jLong)assumed)); } while (assumed != old); return old; } template <> inline __device__ double nd4j_atomicAdd<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ Nd4jLong nd4j_atomicAdd<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; //return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, val + assumed); } while (assumed != old); return old; } template <> inline __device__ long nd4j_atomicAdd<long>(long* address, long val) { unsigned long long* address_as_ull = (unsigned long long int *) address; // return atomicAdd(address, val); unsigned long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, val + assumed); } while (assumed != old); return old; } template <> inline __device__ uint32_t nd4j_atomicAdd<uint32_t>(uint32_t* address, uint32_t val) { return atomicAdd(address, val); } template <> inline __device__ uint64_t nd4j_atomicAdd<uint64_t>(uint64_t* address, uint64_t val) { // unsigned long long* address_as_ull = (unsigned long long int *) address; // //// return atomicAdd(address, val); // unsigned long int old = *address_as_ull, assumed; // do { // assumed = old; // old = atomicCAS(address_as_ull, assumed, val + assumed); // } while (assumed != old); // return old; return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } template <> inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) { #if __CUDA_ARCH__ >= 700 && defined(CUDA_10) atomicAdd(reinterpret_cast<__half*>(address), val.data); #else auto address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = ((float16) old.B.H) + val; fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = ((float16) old.B.L) + val; fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; #endif } template <> inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) { auto address_as_ull = (int*) address; auto addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = old.B.H + val; fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = old.B.L + val; fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <typename T> static inline __device__ T internal_16bit_atomicAdd(T* address, T val) { size_t shift = ((size_t)address & 2); int *base_address = (int *)((char*)address - shift); union I16PAIR { struct { T H; T L; } B; int W; __host__ __device__ I16PAIR() {}; __host__ __device__ ~I16PAIR() {}; }; I16PAIR pairNew, pairOld, pairAssumed; if (reinterpret_cast<int*>(address) == base_address) { pairOld.B.L = val; do { pairNew.B.L = pairOld.B.L; pairNew.B.H = pairOld.B.H + val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.H; } else { pairOld.B.H = val; do { pairNew.B.H = pairOld.B.H; pairNew.B.L = pairOld.B.L + val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.L; } } template <> inline __device__ int16_t nd4j_atomicAdd<int16_t>(int16_t* address, int16_t val) { return internal_16bit_atomicAdd<int16_t>(address, val); } template <> inline __device__ uint16_t nd4j_atomicAdd<uint16_t>(uint16_t* address, uint16_t val) { return internal_16bit_atomicAdd<uint16_t>(address, val); } template <> inline __device__ int8_t nd4j_atomicAdd<int8_t>(int8_t* address, int8_t val) { int res = *address; atomicAdd(&res, (int)val); *address = res; return *address; } template <> inline __device__ uint8_t nd4j_atomicAdd<uint8_t>(uint8_t* address, uint8_t val) { int res = *address; atomicAdd(&res, (int)val); *address = res; return *address; } template <> inline __device__ bool nd4j_atomicAdd<bool>(bool* address, bool val) { *address += (val); return *address; } template <> inline __device__ double nd4j_atomicSub<double>(double* address, double val) { return nd4j_atomicAdd<double>(address, -val); } template <> inline __device__ double nd4j_atomicMul<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val * __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicDiv<double>(double* address, double val) { return nd4j_atomicMul<double>(address, 1./val); } template <> inline __device__ float nd4j_atomicAdd<float>(float* address, float val) { return atomicAdd(address,val); } //template <> //inline __device__ int nd4j_atomicAdd<int>(int* address, int val) { // return atomicAdd(address, val); //} template <> inline __device__ int32_t nd4j_atomicAdd<int32_t>(int32_t* address, int32_t val) { return (int32_t)atomicAdd((int*)address, (int)val); } template <> inline __device__ float nd4j_atomicSub<float>(float* address, float val) { return nd4j_atomicAdd<float>(address, -val); } template <> inline __device__ float16 nd4j_atomicSub<float16>(float16* address, float16 val) { return nd4j_atomicAdd<float16>(address, -val); } template <> inline __device__ bfloat16 nd4j_atomicSub<bfloat16>(bfloat16* address, bfloat16 val) { return nd4j_atomicAdd<bfloat16>(address, -val); } template <> inline __device__ float nd4j_atomicMul<float>(float* address, float val) { int* address_as_ull = ( int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __int_as_float(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ int8_t nd4j_atomicMul<int8_t>(int8_t* address, int8_t val) { unsigned int *base_address = (unsigned int *)((size_t)address & ~3); unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int sel = selectors[(size_t)address & 3]; unsigned int old, assumed, mul, new_; old = *base_address; do { assumed = old; mul = val * (int8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440); new_ = __byte_perm(old, mul, sel); if (new_ == old) break; old = atomicCAS(base_address, assumed, new_); } while (assumed != old); return (int8_t)old; } template <> inline __device__ unsigned char nd4j_atomicMul<unsigned char>(unsigned char* address, unsigned char val) { unsigned int *base_address = (unsigned int *)((size_t)address & ~3); unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int sel = selectors[(size_t)address & 3]; unsigned int old, assumed, mul, new_; old = *base_address; do { assumed = old; mul = val * (uint8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440); new_ = __byte_perm(old, mul, sel); if (new_ == old) break; old = atomicCAS(base_address, assumed, new_); } while (assumed != old); return (uint8_t)old; } template <typename T> static inline __device__ T internal_16bit_atomicMul(T* address, T val) { size_t shift = ((size_t)address & 2); int *base_address = (int *)((char*)address - shift); union I16PAIR { struct { T H; T L; } B; int W; __host__ __device__ I16PAIR() {}; __host__ __device__ ~I16PAIR() {}; }; I16PAIR pairNew, pairOld, pairAssumed; if (reinterpret_cast<int*>(address) == base_address) { pairOld.B.L = val; do { pairNew.B.L = pairOld.B.L; pairNew.B.H = pairOld.B.H * val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.H; } else { pairOld.B.H = val; do { pairNew.B.H = pairOld.B.H; pairNew.B.L = pairOld.B.L * val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.L; } } template <> inline __device__ int16_t nd4j_atomicMul<int16_t>(int16_t* address, int16_t val) { return internal_16bit_atomicMul<int16_t>(address, val); } template <> inline __device__ uint16_t nd4j_atomicMul<uint16_t>(uint16_t* address, uint16_t val) { return internal_16bit_atomicMul<uint16_t>(address, val); } template <> inline __device__ int nd4j_atomicMul<int>(int* address, int val) { int* res_address = address; int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return old; } template <> inline __device__ unsigned int nd4j_atomicMul<unsigned int>(unsigned int* address, unsigned int val) { unsigned int* res_address = address; unsigned int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return old; } template <> inline __device__ int64_t nd4j_atomicMul<int64_t>(int64_t* address, int64_t val) { unsigned long long int* res_address = (unsigned long long int*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (int64_t)old; } template <> inline __device__ uint64_t nd4j_atomicMul<uint64_t>(uint64_t* address, uint64_t val) { unsigned long long int* res_address = (unsigned long long int*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (uint64_t)old; } #if !defined(_WIN32) && !defined(_WIN64) template <> inline __device__ Nd4jLong nd4j_atomicMul<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* res_address = (unsigned long long*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (Nd4jLong)old; } #endif template <> inline __device__ bfloat16 nd4j_atomicMul<bfloat16>(bfloat16* address, bfloat16 val) { return internal_16bit_atomicMul<bfloat16>(address, val); } template <> inline __device__ float16 nd4j_atomicMul<float16>(float16* address, float16 val) { return internal_16bit_atomicMul<float16>(address, val); } template <> inline __device__ float nd4j_atomicDiv<float>(float* address, float val) { return nd4j_atomicMul<float>(address, 1.f / val); } template <> inline __device__ float16 nd4j_atomicDiv<float16>(float16* address, float16 val) { return internal_16bit_atomicMul<float16>(address, (float16) 1.f / val); } template <> inline __device__ bfloat16 nd4j_atomicDiv<bfloat16>(bfloat16* address, bfloat16 val) { return internal_16bit_atomicMul<bfloat16>(address, (bfloat16) 1 / val); } } #endif } } #ifdef _OPENMP #ifndef MAX_FLOAT #define MAX_FLOAT 1e37 #endif #pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=-MAX_FLOAT) #pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=MAX_FLOAT) #pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\ initializer (omp_priv=0) #pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in + omp_out)\ initializer (omp_priv=0) #pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in * omp_out)\ initializer (omp_priv=1) #endif #endif /* TEMPLATEMATH_H_ */
GB_unaryop__minv_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_fp32 // op(A') function: GB_tran__minv_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_fp32 ( uint64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
affinity.c
void do1(); void do2(); void do3(); void foo() { #pragma omp parallel proc_bind(spread) { do1(); } #pragma omp parallel proc_bind(master) { do1(); } #pragma omp parallel proc_bind(close) { do1(); } }
omp50_task_depend_mtx2.c
// RUN: %libomp-compile-and-run // Tests OMP 5.0 task dependences "mutexinoutset", emulates compiler codegen // Mutually exclusive tasks get input dependency info array sorted differently // // Task tree created: // task0 task1 // \ / \ // task2 task5 // / \ // task3 task4 // / \ // task6 <-->task7 (these two are mutually exclusive) // \ / // task8 // #include <stdio.h> #include <omp.h> #ifdef _WIN32 #include <windows.h> #define mysleep(n) Sleep(n) #else #include <unistd.h> #define mysleep(n) usleep((n)*1000) #endif static int checker = 0; // to check if two tasks run simultaneously static int err = 0; #ifndef DELAY #define DELAY 100 #endif // --------------------------------------------------------------------------- // internal data to emulate compiler codegen typedef int(*entry_t)(int, int**); typedef struct DEP { size_t addr; size_t len; int flags; } dep; typedef struct ID { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; int thunk(int gtid, int** pshareds) { int t = **pshareds; int th = omp_get_thread_num(); #pragma omp atomic ++checker; printf("task __%d, th %d\n", t, th); if (checker != 1) { err++; printf("Error1, checker %d != 1\n", checker); } mysleep(DELAY); if (checker != 1) { err++; printf("Error2, checker %d != 1\n", checker); } #pragma omp atomic --checker; return 0; } #ifdef __cplusplus extern "C" { #endif int __kmpc_global_thread_num(id*); extern int** __kmpc_omp_task_alloc(id *loc, int gtid, int flags, size_t sz, size_t shar, entry_t rtn); int __kmpc_omp_task_with_deps(id *loc, int gtid, int **task, int nd, dep *dep_lst, int nd_noalias, dep *noalias_dep_lst); static id loc = {0, 2, 0, 0, ";file;func;0;0;;"}; #ifdef __cplusplus } // extern "C" #endif // End of internal data // --------------------------------------------------------------------------- int main() { int i1,i2,i3,i4; omp_set_num_threads(2); #pragma omp parallel { #pragma omp single nowait { dep sdep[2]; int **ptr; int gtid = __kmpc_global_thread_num(&loc); int t = omp_get_thread_num(); #pragma omp task depend(in: i1, i2) { int th = omp_get_thread_num(); printf("task 0_%d, th %d\n", t, th); mysleep(DELAY); } #pragma omp task depend(in: i1, i3) { int th = omp_get_thread_num(); printf("task 1_%d, th %d\n", t, th); mysleep(DELAY); } #pragma omp task depend(in: i2) depend(out: i1) { int th = omp_get_thread_num(); printf("task 2_%d, th %d\n", t, th); mysleep(DELAY); } #pragma omp task depend(in: i1) { int th = omp_get_thread_num(); printf("task 3_%d, th %d\n", t, th); mysleep(DELAY); } #pragma omp task depend(out: i2) { int th = omp_get_thread_num(); printf("task 4_%d, th %d\n", t, th); mysleep(DELAY+5); } // wait a bit longer than task 3 #pragma omp task depend(out: i3) { int th = omp_get_thread_num(); printf("task 5_%d, th %d\n", t, th); mysleep(DELAY); } // compiler codegen start // task1 ptr = __kmpc_omp_task_alloc(&loc, gtid, 0, 28, 16, thunk); sdep[0].addr = (size_t)&i1; sdep[0].len = 0; // not used sdep[0].flags = 4; // mx sdep[1].addr = (size_t)&i4; sdep[1].len = 0; // not used sdep[1].flags = 4; // mx **ptr = t + 10; // init single shared variable __kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0); // task2 ptr = __kmpc_omp_task_alloc(&loc, gtid, 0, 28, 16, thunk); // reverse pointers - library should sort them uniquely sdep[0].addr = (size_t)&i4; sdep[1].addr = (size_t)&i1; **ptr = t + 20; // init single shared variable __kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0); // compiler codegen end #pragma omp task depend(in: i1) { int th = omp_get_thread_num(); printf("task 8_%d, th %d\n", t, th); mysleep(DELAY); } } // single } // parallel if (err == 0) { printf("passed\n"); return 0; } else { printf("failed\n"); return 1; } }
degeneracy_approx_set.h
#pragma once #include "../general.h" #include <cstdlib> #include <omp.h> #include <gms/third_party/fast_statistics.h> #include <gms/third_party/fast_range.h> #include "boundary_function.h" namespace PpParallel { template<BoundaryFunction boundary, bool useRankFormat = false, class SGraph = RoaringGraph, class Output = std::vector<NodeId>> void getDegeneracyOrderingApproxSGraph(const SGraph &graph, Output &res, const double epsilon = 0.001) { using Set = typename SGraph::Set; auto vSize = graph.num_nodes(); NodeId counter = 0; res.resize(vSize); //Prepare Result //Prepare Counter and Working Set std::vector<int> degreeCounter(vSize); NodeId *vArray = new NodeId[vSize]; #pragma omp parallel for schedule(static, 16) for (int i = 0; i < vSize; i++) { degreeCounter[i] = graph.out_neigh(i).cardinality(); vArray[i] = i; } NodeId *start_index = vArray; NodeId *end_index = vArray + vSize; while (counter < vSize) { auto remaining = end_index - start_index; unsigned int border = boundary(start_index, remaining, degreeCounter, epsilon); #ifdef _OPENMP auto middle_index = __gnu_parallel::partition(start_index, end_index, [border, &degreeCounter](const NodeId v) { return degreeCounter[v] <= border; }); #else auto middle_index = std::partition(start_index, end_index, [border, &degreeCounter](const NodeId v) { return degreeCounter[v] <= border; }); #endif auto mid = middle_index - start_index; #ifdef _OPENMP __gnu_parallel::sort(start_index, middle_index, [&degreeCounter](const NodeId v, const NodeId w) { return degreeCounter[v] < degreeCounter[w]; }); #else std::sort(start_index, middle_index, [&degreeCounter](const NodeId v, const NodeId w) { return degreeCounter[v] < degreeCounter[w]; }); #endif Set X(start_index, mid); #pragma omp parallel { //Add to result set #pragma omp for schedule(static, 16) nowait for (int i = 0; i < mid; i++) { if constexpr (useRankFormat) res[start_index[i]] = counter + i; //Result in Rank-Format else res[counter + i] = start_index[i]; //Result in Order-Format } //Reflect removing the vertices from the graph (PULL style) #pragma omp for schedule(static, 16) for (int i = mid; i < remaining; i++) { auto v = start_index[i]; degreeCounter[v] -= graph.out_neigh(v).intersect_count(X); } } start_index = middle_index; counter += mid; } delete[] vArray; } } // namespace PpParallel
sectionModificado.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> main(){ int n=9, i,a,b[n]; for(i=0;i<n;i++) b[i]=-1; #pragma omp parallel { #pragma omp single { printf("Introduce valor de inicializacion a:"); scanf("%d",&a); printf("Single ejecutada por el thread%d\n",omp_get_thread_num()); } #pragma omp for for(i=0;i<n;i++) b[i]=a; } printf("Después de la región parallel:\n"); #pragma omp single { for(i=0;i<n;i++){ printf("Single ejecutada por el thread%d\n",omp_get_thread_num()); printf("b[%d]=%d\t",i,b[i]); } } printf("\n"); }
DRB015-outofbounds-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The outmost loop is be parallelized. But the inner level loop has out of bound access for b[i][j] when j equals to 0. This will case memory access of a previous row's last element. For example, an array of 4x4: j=0 1 2 3 i=0 x x x x 1 x x x x 2 x x x x 3 x x x x outer loop: i=2, inner loop: j=0 array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3] due to linearized row-major storage of the 2-D array. This causes loop-carried data dependence between i=2 and i=1. Data race pair: b[i][j]@80:7 vs. b[i][j-1]@80:15 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i,j; int len=100; if (argc>1) len = atoi(argv[1]); int n=len, m=len; double b[n][m]; #pragma omp parallel for private(i, j) for (i=0;i<n;i++) #pragma omp parallel for private(j) for (j=0;j<m;j++) b[i][j] = i * m + j; for (i=1;i<n;i++) #pragma omp parallel for private(j) for (j=0;j<m;j++) b[i][j]=b[i-1][j]; for (i=0;i<n;i++) for (j=0;j<m;j++) printf("%lf\n",b[i][j]); return 0; }
raytracing.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "math-toolkit.h" #include "primitives.h" #include "raytracing.h" #include "idx_stack.h" #include <immintrin.h> #define MAX_REFLECTION_BOUNCES 3 #define MAX_DISTANCE 1000000000000.0 #define MIN_DISTANCE 0.00001 #define SAMPLES 4 #define SQUARE(x) (x * x) #define MAX(a, b) (a > b ? a : b) #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */ typedef struct _rgb { __m256d r; __m256d g; __m256d b; } rgb; static inline void COPY_POINT3v(point3v *out,const point3 in) { out->x=_mm256_set1_pd(in[0]); out->y=_mm256_set1_pd(in[1]); out->z=_mm256_set1_pd(in[2]); } static inline void COPY_RGB(point3v *out,const point3 in) { out->x=_mm256_set1_pd(in[0]); out->y=_mm256_set1_pd(in[1]); out->z=_mm256_set1_pd(in[2]); } void COPY_POINT3vv(point3v a, const point3v b) { __m256d mzero = _mm256_setzero_pd(); a->x = _mm256_add_pd(mzero, b->x); a->y = _mm256_add_pd(mzero, b->y); a->z = _mm256_add_pd(mzero, b->z); } /* @param t t distance * @return 1 means hit, otherwise 0 */ static __m256d raySphereIntersection(const point3v ray_e, const point3v ray_d, const sphere *sph, intersection *ip, __m256d *t1) { point3v l; point3v sphcen; COPY_POINT3v(sphcen, sph->center); subtract_vector(&sphcen, ray_e, &l); __m256d ms = dot_product(l, ray_d); __m256d ml2 = dot_product(l, l); point3v sphrad; COPY_POINT3v(sphrad, sph->radius); __m256d mr2 = _mm256_mul_pd(sphrad, sphrad); __m256d mzero = _mm256_setzero_pd(); #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ __m256d if1 = _mm256_cmp_pd(ms, mzero, _CMP_LE_OS); __m256d if2 = _mm256_cmp_pd(ml2, mr2, _CMP_GT_OS); if1 = _mm256_and_pd(if1, if2); // if (s < 0 && l2 > r2) __m256d mm2 = _mm256_mul_pd(ms, ms); mm2 = _mm256_sub_pd(l2, ms); // mm2 = ml2 - ms * ms if2 = _mm256_cmp_pd(mm2, mr2, _CMP_GT_OS); // if (m2 > r2) __m256d mq = _mm256_sub_pd(mr2, mm2); mq = _mm256_sqrt_pd(mq); __m256d if3 = _mm256_cmp_pd(ml2, mr2, _CMP_GT_OS); // if (l2 > r2) __m256d smiq = _mm256_sub_pd(ms, mq); // s-q value __m256d sadq = _mm256_add_pd(ms, mq); // s+q value __m256i iall1 = _mm256_set1_epi64x(-1); // iall1 = 0xFF...F n __m256d dall1 = _mm256_castsi256_pd(iall1); if1 = _mm256_or_pd(if1, if2); // if1 = if1 or if2, after will use it to load the t1 before __m256d notif1 = _mm256_xor_pd(if1, dall1); // notif1 = not ( if1 ) __m256d t1copy = _mm256_add_pd(*t1, mzero); // create a copy for *t1 smiq = _mm256_and_pd(smiq, if3); __m256d notif3 = _mm256_xor_pd(if3, dall1); // notif3 = not ( if3 ) sadq = _mm256_and_pd(sadq, notif3); __m256d newt1 = _mm256_or_pd(smiq, sadq); // the new value of *t1 if not return t1copy = _mm256_and_pd(t1copy, if1); t1notre = _mm256_and_pd(newt1, notif1); __m256d t1new = _mm256_add_pd(t1copy, t1notre); *t1 = __m256d_add_pd(mzero, t1new); multiply_vector(ray_d, *t1, ip->point); add_vector(ray_e, ip->point, ip->point); subtract_vector(ip->point, sphcen, ip->normal); normalize(ip->normal); __m256d dotres = dot_product(ip->normal, ray_d); __m256d ifdot = _mm256_cmp_pd(dotres, mzero, _CMP_GT_OS); // if dotres greater than 0 __m256d notifdot = _mm256_xor_pd(ifdot, dall1); __m256d ipcopy = _mm256_add_pd(ip->normal, mzero); ipcopy = _mm256_and_pd(ipcopy, notifdot); __m256d minus1 = _mm256_set1_pd(-1); multiply_vector(ip->normal, minus1, ip->normal); ip->normal = _mm256_and_pd(ip->normal, ifdot); ip->normal = _mm256_or_pd(ip->normal, ipcopy); } /* @return 1 means hit, otherwise 0; */ static int rayRectangularIntersection(const point3 ray_e, const point3 ray_d, rectangular *rec, intersection *ip, double *t1) { point3 e01, e03, p; subtract_vector(rec->vertices[1], rec->vertices[0], e01); subtract_vector(rec->vertices[3], rec->vertices[0], e03); cross_product(ray_d, e03, p); double det = dot_product(e01, p); /* Reject rays orthagonal to the normal vector. * I.e. rays parallell to the plane. */ if (det < 1e-4) return 0; double inv_det = 1.0 / det; point3 s; subtract_vector(ray_e, rec->vertices[0], s); double alpha = inv_det * dot_product(s, p); if ((alpha > 1.0) || (alpha < 0.0)) return 0; point3 q; cross_product(s, e01, q); double beta = inv_det * dot_product(ray_d, q); if ((beta > 1.0) || (beta < 0.0)) return 0; *t1 = inv_det * dot_product(e03, q); if (alpha + beta > 1.0f) { /* for the second triangle */ point3 e23, e21; subtract_vector(rec->vertices[3], rec->vertices[2], e23); subtract_vector(rec->vertices[1], rec->vertices[2], e21); cross_product(ray_d, e21, p); det = dot_product(e23, p); if (det < 1e-4) return 0; inv_det = 1.0 / det; subtract_vector(ray_e, rec->vertices[2], s); alpha = inv_det * dot_product(s, p); if (alpha < 0.0) return 0; cross_product(s, e23, q); beta = inv_det * dot_product(ray_d, q); if ((beta < 0.0) || (beta + alpha > 1.0)) return 0; *t1 = inv_det * dot_product(e21, q); } if (*t1 < 1e-4) return 0; COPY_POINT3(ip->normal, rec->normal); if (dot_product(ip->normal, ray_d)>0.0) multiply_vector(ip->normal, -1, ip->normal); multiply_vector(ray_d, *t1, ip->point); add_vector(ray_e, ip->point, ip->point); return 1; } static void localColor(color local_color, const color light_color, double diffuse, double specular, const object_fill *fill) { color ambi = { 0.1, 0.1, 0.1 }; color diff, spec, lightCo, surface; /* Local Color = ambient * surface + * light * ( kd * surface * diffuse + ks * specular) */ COPY_COLOR(diff, fill->fill_color); multiply_vector(diff, fill->Kd, diff); multiply_vector(diff, diffuse, diff); COPY_COLOR(lightCo, light_color); multiply_vectors(diff, lightCo, diff); COPY_COLOR(spec, light_color); multiply_vector(spec, fill->Ks, spec); multiply_vector(spec, specular, spec); COPY_COLOR(surface, fill->fill_color); multiply_vectors(ambi,surface, ambi); add_vector(diff, ambi, diff); add_vector(diff, spec, diff); add_vector(local_color, diff, local_color); } /* @param d direction of the ray into intersection * @param l direction of intersection to light * @param n surface normal */ static void compute_specular_diffuse(__m256d *diffuse, __m256d *specular, const point3v d, const point3v l, const point3v n, double phong_pow) { point3v d_copy, l_copy, middle, r; __m256d minus1 = _mm256_set1_pd(-1); __m256d two = _mm256_set1_pd(2); COPY_POINT3vv(d_copy, d); multiply_vector(d_copy, minus1, d_copy); normalize(d_copy); COPY_POINT3vv(l_copy, l); multiply_vector(l_copy, minus1, l_copy); normalize(l_copy); __m256d tmp = dotproduct(n, l_copy); multiply_vector(n, tmp, middle); multiply_vector(middle, two, middle); subtract_vector(middle, l_copy, r); normalize(r); __m256d mzero = _mm256_setzero_pd(); __m256i iall1 = _mm256_set1_epi64x(-1); // iall1 = 0xFF...F __m256d dall1 = _mm256_castsi256_pd(iall1); #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ __m256d dot_nl = dotproduct(n, l_copy); __m256d max = _mm256_cmp_pd(dot_nl, mzero, _CMP_GT_OS); // n2 <= 0.0 *diffuse = _mm256_and_pd(max, dot_nl); // *diffuse = MAX(0, dot_product(n, l_copy)); __m256d dot_rd = dotproduct(r, d_copy); __m256d max0rd = _mm256_cmp_pd(dot_rd, mzero, _CMP_GT_OS); __m256d powtarget = _mm256_and_pd(max0rd, dot_rd); // phong_pow should only be the value of 5 or 30 // need to think how to let pow work in avx if (5 == phong_pow) { __m256d powtar2 = _mm256_mul_pd(powtar, powtar); __m256d powtar4 = _mm256_mul_pd(powtar2, powtar2); __m256d powtar5 = _mm256_mul_pd(powtar4, powtar); *specular = powtar5; } else if (30 == phong_pow) { __m256d powtar2 = _mm256_mul_pd(powtar, powtar); __m256d powtar4 = _mm256_mul_pd(powtar2, powtar2); __m256d powtar8 = _mm256_mul_pd(powtar4, powtar4); __m256d powtar16 = _mm256_mul_pd(powtar8, powtar8); __m256d powtar24 = _mm256_mul_pd(powtar16, powtar8); __m256d powtar6 = _mm256_mul_pd(powtar4, powtar2); __m256d powtar30 = _mm256_mul_pd(powtar24, powtar6); *specular = powtar30; } } /* @param r direction of reflected ray * @param d direction of primary ray into intersection * @param n surface normal at intersection */ static void reflection(point3v r, const point3v d, const point3v n) { __m256d dot_dn = dot_product(d, n); __m256d tmp = _mm256_set1_pd(-2); dot_dn = _mm256_mul_pd(tmp, dot_dn); // -2.0 * dot_product(d,n) multiply_vector(n, dot_dn, r); add_vector(r, d, r); } /* reference: https://www.opengl.org/sdk/docs/man/html/refract.xhtml */ static void refraction(point3v *t, const point3v *I, const point3v *N, double n1, double n2) { __m256d n2v = _mm256_set1_pd(n2); __m256d eta = _mm256_set1_pd(n1/n2); __m256d dot_NI = dot_product(N, I); __m256d k = _mm256_set_pd(1); __m256d eta2 = _mm256_mul_pd(eta, eta); // eta2 = eta * eta __m256d dot_NI2 = _mm256_mul_pd(dot_NI, dot_NI); // dot_NI2 = dot_NI * dot_NI dot_NI2 = _mm256_sub_pd(k, dot_NI2); // dot_NI2 = 1 - dot_NI * dot_NI eta2 = _mm256_mul_pd(eta2, dot_NI2); // eta2 = eta * eta * ( 1 - dot_NI * dot_NI) k = _mm256_sub_pd(k, eta2); // k = 1 - eta * eta * ( 1 - dot_NI * dot_NI) #define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ __m256d mzero = _mm256_setzero_pd(); __m256i iall1 = _mm256_set1_epi64x(-1); // iall1 = 0xFF...F __m256d dall1 = _mm256_castsi256_pd(iall1); __m256d if1 = _mm256_cmp_pd(k, mzero, _CMP_LT_OS); // k < 0.0 __m256d if2 = _mm256_cmp_pd(n2v, mzero, _CMP_LE_OS); // n2 <= 0.0 __m256d ifa = _mm256_or_pd(k, n2v); // if (k < 0.0 || n2 <= 0.0) __m256d notifa = _mm256_xor_pd(ifa, dall1); point3v tmp; multiply_vector(I, eta, t); __m256d midpar = _mm256_mul_pd(eta, dot_NI); k = _mm256_sqrt_pd(k); // k = sqrt(k) midpar = _mm256_add_pd(midpar, k); multiply_vector(N, midpar, tmp); subtract_vector(t, tmp, t); t->x = _mm256_and_pd(t->x, notifa); t->y = _mm256_and_pd(t->y, notifa); t->z = _mm256_and_pd(t->z, notifa); } /* @param i direction of incoming ray, unit vector * @param r direction of refraction ray, unit vector * @param normal unit vector * @param n1 refraction index * @param n2 refraction index * * reference: http://graphics.stanford.edu/courses/cs148-10-summer/docs/2006--degreve--reflection_refraction.pdf */ static double fresnel(const point3 r, const point3 l, const point3 normal, double n1, double n2) { /* TIR */ if (length(l) < 0.99) return 1.0; double cos_theta_i = -dot_product(r, normal); double cos_theta_t = -dot_product(l, normal); double r_vertical_root = (n1 * cos_theta_i - n2 * cos_theta_t) / (n1 * cos_theta_i + n2 * cos_theta_t); double r_parallel_root = (n2 * cos_theta_i - n1 * cos_theta_t) / (n2 * cos_theta_i + n1 * cos_theta_t); return (r_vertical_root * r_vertical_root + r_parallel_root * r_parallel_root) / 2.0; } /* @param t distance */ static intersection ray_hit_object(const point3 e, const point3 d, double t0, double t1, const rectangular_node rectangulars, rectangular_node *hit_rectangular, const sphere_node spheres, sphere_node *hit_sphere) { /* set these to not hit */ *hit_rectangular = NULL; *hit_sphere = NULL; point3 biased_e; multiply_vector(d, t0, biased_e); add_vector(biased_e, e, biased_e); double nearest = t1; intersection result, tmpresult; for (rectangular_node rec = rectangulars; rec; rec = rec->next) { if (rayRectangularIntersection(biased_e, d, &(rec->element), &tmpresult, &t1) && (t1 < nearest)) { /* hit is closest so far */ *hit_rectangular = rec; nearest = t1; result = tmpresult; } } /* check the spheres */ for (sphere_node sphere = spheres; sphere; sphere = sphere->next) { if (raySphereIntersection(biased_e, d, &(sphere->element), &tmpresult, &t1) && (t1 < nearest)) { *hit_sphere = sphere; *hit_rectangular = NULL; nearest = t1; result = tmpresult; } } return result; } /* @param d direction of ray * @param w basic vectors */ static void rayConstruction(point3v *d, const point3v *u, const point3v *v, const point3v *w, unsigned int *i, unsigned int *j, const viewpoint *view, unsigned int width, unsigned int height) { double xmin = -0.0175; double ymin = -0.0175; double xmax = 0.0175; double ymax = 0.0175; double focal = 0.05; point3v u_tmp, v_tmp, w_tmp, s; //double w_s = focal; //double u_s = xmin + ((xmax - xmin) * (float) i / (width - 1)); //double v_s = ymax + ((ymin - ymax) * (float) j / (height - 1)); __m256d w_s =_mm256_set1_pd(focal); double temp[4]; for(int k=0; k<4; k++) { temp[k]=xmin + ((xmax - xmin) * (float) i[k] / (width - 1)); } __m256d u_s =_mm256_loadu_pd(temp); for(int k=0; k<4; k++) { temp[k]=ymax + ((ymin - ymax) * (float) j[k] / (height - 1)); } __m256d v_s =_mm256_loadu_pd(temp); /* s = e + u_s * u + v_s * v + w_s * w */ m_multiply_vector(u, u_s, &u_tmp); m_multiply_vector(v, v_s, &v_tmp); m_multiply_vector(w, w_s, &w_tmp); point3v vrp; COPY_POINT3v(&vrp,view->vrp); madd_vector(&vrp, &u_tmp, &s); madd_vector(&s, &v_tmp, &s); madd_vector(&s, &w_tmp, &s); /* p(t) = e + td = e + t(s - e) */ msubtract_vector(&s, &vrp, d); mnormalize(d); } static void calculateBasisVectors(point3v *u, point3v *v, point3v *w, const viewpoint *view) { /* w */ COPY_POINT3v(w, view->vpn); mnormalize(w); /* u = (t x w) / (|t x w|) */ point3v mvup; COPY_POINT3v(&mvup, view->vup); mcross_product(w, &mvup, u); mnormalize(u); /* v = w x u */ mcross_product(u, w, v); mnormalize(v); } /* @brief protect color value overflow */ static void protect_color_overflow(color c) { for (int i = 0; i < 3; i++) if (c[i] > 1.0) c[i] = 1.0; } static unsigned int ray_color(const point3 e, double t, const point3 d, idx_stack *stk, const rectangular_node rectangulars, const sphere_node spheres, const light_node lights, color object_color, int bounces_left) { rectangular_node hit_rec = NULL, light_hit_rec = NULL; sphere_node hit_sphere = NULL, light_hit_sphere = NULL; double diffuse, specular; point3 l, _l, r, rr; object_fill fill; color reflection_part; color refraction_part; /* might be a reflection ray, so check how many times we've bounced */ if (bounces_left == 0) { SET_COLOR(object_color, 0.0, 0.0, 0.0); return 0; } /* check for intersection with a sphere or a rectangular */ intersection ip= ray_hit_object(e, d, t, MAX_DISTANCE, rectangulars, &hit_rec, spheres, &hit_sphere); if (!hit_rec && !hit_sphere) return 0; /* pick the fill of the object that was hit */ fill = hit_rec ? hit_rec->element.rectangular_fill : hit_sphere->element.sphere_fill; void *hit_obj = hit_rec ? (void *) hit_rec : (void *) hit_sphere; /* assume it is a shadow */ SET_COLOR(object_color, 0.0, 0.0, 0.0); for (light_node light = lights; light; light = light->next) { /* calculate the intersection vector pointing at the light */ subtract_vector(ip.point, light->element.position, l); multiply_vector(l, -1, _l); normalize(_l); /* check for intersection with an object. use ignore_me * because we don't care about this normal */ ray_hit_object(ip.point, _l, MIN_DISTANCE, length(l), rectangulars, &light_hit_rec, spheres, &light_hit_sphere); /* the light was not block by itself(lit object) */ if (light_hit_rec || light_hit_sphere) continue; compute_specular_diffuse(&diffuse, &specular, d, l, ip.normal, fill.phong_power); localColor(object_color, light->element.light_color, diffuse, specular, &fill); } reflection(r, d, ip.normal); double idx = idx_stack_top(stk).idx, idx_pass = fill.index_of_refraction; if (idx_stack_top(stk).obj == hit_obj) { idx_stack_pop(stk); idx_pass = idx_stack_top(stk).idx; } else { idx_stack_element e = { .obj = hit_obj, .idx = fill.index_of_refraction }; idx_stack_push(stk, e); } refraction(rr, d, ip.normal, idx, idx_pass); double R = (fill.T > 0.1) ? fresnel(d, rr, ip.normal, idx, idx_pass) : 1.0; /* totalColor = localColor + mix((1-fill.Kd) * fill.R * reflection, T * refraction, R) */ if (fill.R > 0) { /* if we hit something, add the color */ int old_top = stk->top; if (ray_color(ip.point, MIN_DISTANCE, r, stk, rectangulars, spheres, lights, reflection_part, bounces_left - 1)) { multiply_vector(reflection_part, R * (1.0 - fill.Kd) * fill.R, reflection_part); add_vector(object_color, reflection_part, object_color); } stk->top = old_top; } /* calculate refraction ray */ if ((length(rr) > 0.0) && (fill.T > 0.0) && (fill.index_of_refraction > 0.0)) { normalize(rr); if (ray_color(ip.point, MIN_DISTANCE, rr, stk,rectangulars, spheres, lights, refraction_part, bounces_left - 1)) { multiply_vector(refraction_part, (1 - R) * fill.T, refraction_part); add_vector(object_color, refraction_part, object_color); } } protect_color_overflow(object_color); return 1; } /* @param background_color this is not ambient light */ void raytracing(uint8_t *pixels, color background_color, rectangular_node rectangulars, sphere_node spheres, light_node lights, const viewpoint *view, int width, int height) { point3v u, v, w, d; color object_color[4];//= { 0.0, 0.0, 0.0 }; for(int i=0; i<4; i++) { object_color[i][0]=0.0; object_color[i][1]=0.0; object_color[i][2]=0.0; } //rgb object_rgb; /* calculate u, v, w */ calculateBasisVectors(&u, &v, &w, view); idx_stack stk[4]; unsigned int i4[4],j4[4]; int factor = sqrt(SAMPLES); // #pragma omp parallel for num_threads (2) private(stk,object_color,d) for (int j = 0; j < height; j+=4) { for (int i = 0; i < width; i++) { double r[4] , g[4] , b[4]; for(int ii=0; ii<4; ii++) { r[ii]=0.0; g[ii]=0.0; b[ii]=0.0; } /* MSAA */ for (int s = 0; s < SAMPLES; s++) { idx_stack_init(&stk[0]); idx_stack_init(&stk[1]); idx_stack_init(&stk[2]); idx_stack_init(&stk[3]); for(int k=0; k<4; k++) { i4[k]= i * factor + s / factor; j4[k]= (j+k) * factor + s % factor; } rayConstruction(&d, &u, &v, &w, i4, j4, view, width * factor, height * factor); //point3v vrp; //COPY_POINT3v(vrp,view->vrp); point3 dp[4]; double x[4]; double y[4]; double z[4]; _mm256_storeu_pd(x,d.x); _mm256_storeu_pd(y,d.y); _mm256_storeu_pd(z,d.z); for(int ii=0; ii<4; ii++) { dp[ii][0]=x[ii]; dp[ii][1]=y[ii]; dp[ii][2]=z[ii]; } for(int k=0; k<4; k++) { if (ray_color(view->vrp, 0.0, dp[k], &(stk[k]), rectangulars, spheres, lights, object_color[k], MAX_REFLECTION_BOUNCES)) { r[k] += object_color[k][0]; g[k] += object_color[k][1]; b[k] += object_color[k][2]; } else { r[k] += background_color[0]; g[k] += background_color[1]; b[k] += background_color[2]; } pixels[((i + ((j+k) * width)) * 3) + 0] = r[k] * 255 / SAMPLES; pixels[((i + ((j+k) * width)) * 3) + 1] = g[k] * 255 / SAMPLES; pixels[((i + ((j+k) * width)) * 3) + 2] = b[k] * 255 / SAMPLES; if(i==width/2&&j==200&&k==0) printf("%lf %lf %lf\n",r[k],g[k],b[k]); } } } } }
ast-dump-openmp-begin-declare-variant_6.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int also_before(void) { return 0; } #pragma omp begin declare variant match(implementation={vendor(ibm)}) int also_after(void) { return 1; } int also_before(void) { return 2; } #pragma omp end declare variant int also_after(void) { return 0; } int main() { // Should return 0. return also_after() + also_before(); } // Make sure: // - we see the specialization in the AST // - we do use the original pointers for the calls as the variants are not applicable (this is not the ibm compiler). // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(ibm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(ibm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:25:1> line:22:5 main 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:12, line:25:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-CallExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_29:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_30:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_31:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolutiondepthwise_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 9; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0 + 3); float32x4_t _k678x = vld1q_f32(kernel0 + 6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #384] \n" "ld1 {v8.4s, v9.4s, v10.4s}, [%3] \n" // r0 "add %3, %3, #32 \n" "ext v11.16b, v8.16b, v9.16b, #4 \n" "ext v13.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v8.16b, v9.16b, #8 \n" "ext v14.16b, v9.16b, v10.16b, #8 \n" "0: \n" "and v4.16b, %17.16b, %17.16b \n" // v4 = _bias0 "and v5.16b, %17.16b, %17.16b \n" // v5 = _bias0 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // r3 "add %6, %6, #32 \n" "and v6.16b, %17.16b, %17.16b \n" // v6 = _bias0 "and v7.16b, %17.16b, %17.16b \n" // v7 = _bias0 "ext v15.16b, v16.16b, v17.16b, #4 \n" "fmla v4.4s, v8.4s, %14.s[0] \n" "fmla v5.4s, v9.4s, %14.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" "fmla v6.4s, v16.4s, %16.s[0] \n" "fmla v7.4s, v17.4s, %16.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" "fmla v4.4s, v11.4s, %14.s[1] \n" "fmla v5.4s, v13.4s, %14.s[1] \n" "ext v21.16b, v17.16b, v18.16b, #8 \n" "fmla v6.4s, v15.4s, %16.s[1] \n" "fmla v7.4s, v20.4s, %16.s[1] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v22.4s, v23.4s, v24.4s}, [%4] \n" // r1 "fmla v4.4s, v12.4s, %14.s[2] \n" "fmla v5.4s, v14.4s, %14.s[2] \n" "add %4, %4, #32 \n" "fmla v6.4s, v19.4s, %16.s[2] \n" "fmla v7.4s, v21.4s, %16.s[2] \n" "ext v25.16b, v22.16b, v23.16b, #4 \n" "fmla v4.4s, v22.4s, %15.s[0] \n" "fmla v5.4s, v23.4s, %15.s[0] \n" "ext v27.16b, v23.16b, v24.16b, #4 \n" "fmla v6.4s, v22.4s, %14.s[0] \n" "fmla v7.4s, v23.4s, %14.s[0] \n" "ext v26.16b, v22.16b, v23.16b, #8 \n" "fmla v4.4s, v25.4s, %15.s[1] \n" "fmla v5.4s, v27.4s, %15.s[1] \n" "ext v28.16b, v23.16b, v24.16b, #8 \n" "fmla v6.4s, v25.4s, %14.s[1] \n" "fmla v7.4s, v27.4s, %14.s[1] \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v8.4s, v9.4s, v10.4s}, [%5] \n" // r2 "fmla v4.4s, v26.4s, %15.s[2] \n" "fmla v5.4s, v28.4s, %15.s[2] \n" "add %5, %5, #32 \n" "fmla v6.4s, v26.4s, %14.s[2] \n" "fmla v7.4s, v28.4s, %14.s[2] \n" "ext v11.16b, v8.16b, v9.16b, #4 \n" "fmla v4.4s, v8.4s, %16.s[0] \n" "fmla v5.4s, v9.4s, %16.s[0] \n" "ext v13.16b, v9.16b, v10.16b, #4 \n" "fmla v6.4s, v8.4s, %15.s[0] \n" "fmla v7.4s, v9.4s, %15.s[0] \n" "ext v12.16b, v8.16b, v9.16b, #8 \n" "fmla v4.4s, v11.4s, %16.s[1] \n" "fmla v5.4s, v13.4s, %16.s[1] \n" "ext v14.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %15.s[1] \n" "fmla v7.4s, v13.4s, %15.s[1] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v8.4s, v9.4s, v10.4s}, [%3] \n" // r0 next loop "fmla v4.4s, v12.4s, %16.s[2] \n" "fmla v5.4s, v14.4s, %16.s[2] \n" "add %3, %3, #32 \n" "ext v11.16b, v8.16b, v9.16b, #4 \n" "fmla v6.4s, v12.4s, %15.s[2] \n" "fmla v7.4s, v14.4s, %15.s[2] \n" "ext v13.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v8.16b, v9.16b, #8 \n" "st1 {v4.4s, v5.4s}, [%1], #32 \n" "ext v14.16b, v9.16b, v10.16b, #8 \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s, v7.4s}, [%2], #32 \n" "bne 0b \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k012x), // %14 "w"(_k345x), // %15 "w"(_k678x), // %16 "w"(_bias0) // %17 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28"); } if (remain >= 4) { remain -= 4; asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r0 "add %2, %2, #16 \n" "and v4.16b, %15.16b, %15.16b \n" // v4 = _bias0 "and v6.16b, %15.16b, %15.16b \n" // v6 = _bias0 "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4s, v17.4s}, [%5] \n" // r3 "add %5, %5, #16 \n" "ext v11.16b, v8.16b, v9.16b, #4 \n" "ext v15.16b, v16.16b, v17.16b, #4 \n" "fmla v4.4s, v8.4s, %12.s[0] \n" "fmla v6.4s, v16.4s, %14.s[0] \n" "ext v12.16b, v8.16b, v9.16b, #8 \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" "fmla v4.4s, v11.4s, %12.s[1] \n" "fmla v6.4s, v15.4s, %14.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v22.4s, v23.4s}, [%3] \n" // r1 "fmla v4.4s, v12.4s, %12.s[2] \n" "add %3, %3, #16 \n" "fmla v6.4s, v19.4s, %14.s[2] \n" "ext v25.16b, v22.16b, v23.16b, #4 \n" "fmla v4.4s, v22.4s, %13.s[0] \n" "fmla v6.4s, v22.4s, %12.s[0] \n" "ext v26.16b, v22.16b, v23.16b, #8 \n" "fmla v4.4s, v25.4s, %13.s[1] \n" "fmla v6.4s, v25.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4] \n" // r2 "fmla v4.4s, v26.4s, %13.s[2] \n" "add %4, %4, #16 \n" "fmla v6.4s, v26.4s, %12.s[2] \n" "ext v11.16b, v8.16b, v9.16b, #4 \n" "fmla v4.4s, v8.4s, %14.s[0] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "ext v12.16b, v8.16b, v9.16b, #8 \n" "fmla v4.4s, v11.4s, %14.s[1] \n" "fmla v6.4s, v11.4s, %13.s[1] \n" "fmla v4.4s, v12.4s, %14.s[2] \n" "fmla v6.4s, v12.4s, %13.s[2] \n" "st1 {v4.4s}, [%0], #16 \n" "st1 {v6.4s}, [%1], #16 \n" : "=r"(outptr), // %0 "=r"(outptr2), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr), "1"(outptr2), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k012x), // %12 "w"(_k345x), // %13 "w"(_k678x), // %14 "w"(_bias0) // %15 : "cc", "memory", "v4", "v6", "v8", "v9", "v11", "v12", "v15", "v16", "v17", "v18", "v19", "v22", "v23", "v25", "v26"); } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n" // r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "vmul.f32 q7, q9, %e14[0] \n" "vand q13, %q17, %q17 \n" // q13 = _bias0 "vmul.f32 q6, q11, %e14[1] \n" "vmla.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n" // r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "vmul.f32 q8, q9, %e14[0] \n" "vand q15, %q17, %q17 \n" // q15 = _bias0 "vmul.f32 q14, q11, %e14[1] \n" "vmla.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n" // r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n" // r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n" // r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k012x), // %14 "w"(_k345x), // %15 "w"(_k678x), // %16 "w"(_bias0) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); float32x4_t _sum2 = vmulq_f32(_r10, _k012x); _sum2 = vmlaq_f32(_sum2, _r20, _k345x); _sum2 = vmlaq_f32(_sum2, _r30, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); _sum2 = vsetq_lane_f32(bias0, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #384] \n" "ld1 {v8.4s, v9.4s, v10.4s}, [%2] \n" // r0 "add %2, %2, #32 \n" "ext v12.16b, v8.16b, v9.16b, #4 \n" "ext v14.16b, v9.16b, v10.16b, #4 \n" "0: \n" "fmul v6.4s, v8.4s, %10.s[0] \n" "and v4.16b, %13.16b, %13.16b \n" // v4 = _bias0 "fmul v7.4s, v9.4s, %10.s[0] \n" "and v5.16b, %13.16b, %13.16b \n" // v5 = _bias0 "fmla v4.4s, v12.4s, %10.s[1] \n" "ext v13.16b, v8.16b, v9.16b, #8 \n" "fmla v5.4s, v14.4s, %10.s[1] \n" "ext v15.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v13.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n" // r1 "fmla v7.4s, v15.4s, %10.s[2] \n" "add %3, %3, #32 \n" "fmla v4.4s, v16.4s, %11.s[0] \n" "ext v20.16b, v16.16b, v17.16b, #4 \n" "fmla v5.4s, v17.4s, %11.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #4 \n" "fmla v6.4s, v20.4s, %11.s[1] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" "fmla v7.4s, v22.4s, %11.s[1] \n" "ext v23.16b, v17.16b, v18.16b, #8 \n" "fmla v4.4s, v21.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v24.4s, v25.4s, v26.4s}, [%4] \n" // r2 "fmla v5.4s, v23.4s, %11.s[2] \n" "add %4, %4, #32 \n" "fmla v6.4s, v24.4s, %12.s[0] \n" "ext v12.16b, v24.16b, v25.16b, #4 \n" "fmla v7.4s, v25.4s, %12.s[0] \n" "ext v14.16b, v25.16b, v26.16b, #4 \n" "fmla v4.4s, v12.4s, %12.s[1] \n" "ext v13.16b, v24.16b, v25.16b, #8 \n" "fmla v5.4s, v14.4s, %12.s[1] \n" "ext v15.16b, v25.16b, v26.16b, #8 \n" "fmla v6.4s, v13.4s, %12.s[2] \n" "fmla v7.4s, v15.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v8.4s, v9.4s, v10.4s}, [%2] \n" // r0 next loop "fadd v4.4s, v4.4s, v6.4s \n" "add %2, %2, #32 \n" "fadd v5.4s, v5.4s, v7.4s \n" "ext v12.16b, v8.16b, v9.16b, #4 \n" "ext v14.16b, v9.16b, v10.16b, #4 \n" "subs %w0, %w0, #1 \n" "st1 {v4.4s, v5.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v20", "v21", "v22", "v23", "v24", "v25", "v26"); } if (remain >= 4) { remain -= 4; asm volatile( "prfm pldl1keep, [%1, #192] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" // r0 "add %1, %1, #16 \n" "and v4.16b, %11.16b, %11.16b \n" // v4 = _bias0 "ext v12.16b, v8.16b, v9.16b, #4 \n" "fmul v6.4s, v8.4s, %8.s[0] \n" "ext v13.16b, v8.16b, v9.16b, #8 \n" "fmla v4.4s, v12.4s, %8.s[1] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v16.4s, v17.4s}, [%2] \n" // r1 "add %2, %2, #16 \n" "fmla v6.4s, v13.4s, %8.s[2] \n" "ext v20.16b, v16.16b, v17.16b, #4 \n" "fmla v4.4s, v16.4s, %9.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" "fmla v6.4s, v20.4s, %9.s[1] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v24.4s, v25.4s}, [%3] \n" // r2 "add %3, %3, #16 \n" "fmla v4.4s, v21.4s, %9.s[2] \n" "ext v12.16b, v24.16b, v25.16b, #4 \n" "fmla v6.4s, v24.4s, %10.s[0] \n" "ext v13.16b, v24.16b, v25.16b, #8 \n" "fmla v4.4s, v12.4s, %10.s[1] \n" "fmla v6.4s, v13.4s, %10.s[2] \n" "fadd v4.4s, v4.4s, v6.4s \n" "st1 {v4.4s}, [%0], #16 \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "w"(_k012x), // %8 "w"(_k345x), // %9 "w"(_k678x), // %10 "w"(_bias0) // %11 : "cc", "memory", "v4", "v6", "v8", "v9", "v12", "v13", "v16", "v17", "v20", "v21", "v24", "v25"); } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n" // r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "vmul.f32 q7, q8, %e10[0] \n" "vand q14, %q13, %q13 \n" // q14 = _bias0 "vmul.f32 q13, q10, %e10[1] \n" "vmla.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n" // r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n" // r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n" // r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0 + 3); float32x4_t _k678x = vld1q_f32(kernel0 + 6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "and v11.16b, %13.16b, %13.16b \n" // v11 = _bias0 "0: \n" "fmul v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "and v11.16b, %13.16b, %13.16b \n" // v11 = _bias0 "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vand q11, %q13, %q13 \n" "0: \n" "vmul.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "vand q11, %q13, %q13 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
reduction-13.c
char z[10] = { 0 }; __attribute__((noinline, noclone)) void foo (int (*x)[3][2], int *y, long w[1][2], int p1, long p2, long p3, int p4, int p5, long p6, short p7, int s, int t) { unsigned long long a[p7 + 4]; short b[p7]; int i; for (i = 0; i < p7 + 4; i++) { if (i < p7) b[i] = -6; a[i] = 0; } #pragma omp parallel for reduction(+:x[-1:p1 + 1][:p2], z[t + 2:p3]) \ reduction(*:y[-s:p4]) reduction(|:a[s + 3:p5]) \ reduction(&:w[s + 1:p6 - 1][t:p6]) reduction(max:b[2:]) for (i = 0; i < 128; i++) { x[i / 64 - 1][i % 3][(i / 4) & 1] += i; if ((i & 15) == 1) y[1] *= 3; if ((i & 31) == 2) y[2] *= 7; if ((i & 63) == 3) y[3] *= 17; z[i / 32 + 2] += (i & 3); if (i < 4) z[i + 2] += i; a[i / 32 + 2] |= 1ULL << (i & 30); w[0][i & 1] &= ~(1L << (i / 17 * 3)); if ((i % 23) > b[2]) b[2] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; } for (i = 0; i < 9; i++) if (a[i] != ((i < 6 && i >= 2) ? 0x55555555ULL : 0)) __builtin_abort (); if (b[0] != -6 || b[1] != -6 || b[2] != 22 || b[3] != 84 || b[4] != 127) __builtin_abort (); } int main () { int a[4][3][2] = {}; static int a2[4][3][2] = {{{ 0, 0 }, { 0, 0 }, { 0, 0 }}, {{ 312, 381 }, { 295, 356 }, { 337, 335 }}, {{ 1041, 975 }, { 1016, 1085 }, { 935, 1060 }}, {{ 0, 0 }, { 0, 0 }, { 0, 0 }}}; int y[5] = { 0, 1, 1, 1, 0 }; int y2[5] = { 0, 6561, 2401, 289, 0 }; char z2[10] = { 0, 0, 48, 49, 50, 51, 0, 0, 0, 0 }; long w[1][2] = { ~0L, ~0L }; foo (&a[2], y, w, 1, 3L, 4L, 3, 4, 2L, 5, -1, 0); if (__builtin_memcmp (a, a2, sizeof (a)) || __builtin_memcmp (y, y2, sizeof (y)) || __builtin_memcmp (z, z2, sizeof (z)) || w[0][0] != ~0x249249L || w[0][1] != ~0x249249L) __builtin_abort (); return 0; }
apm.c
/** * APPROXIMATE PATTERN MATCHING * * INF560 X2016 */ #include <string.h> #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <sys/time.h> #include <mpi.h> #include <omp.h> #define APM_DEBUG 0 char * read_input_file( char * filename, int * size ) { char * buf ; off_t fsize; int fd = 0 ; int n_bytes = 1 ; /* Open the text file */ fd = open( filename, O_RDONLY ) ; if ( fd == -1 ) { fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ; return NULL ; } /* Get the number of characters in the textfile */ fsize = lseek(fd, 0, SEEK_END); lseek(fd, 0, SEEK_SET); /* TODO check return of lseek */ #if APM_DEBUG printf( "File length: %lld\n", fsize ) ; #endif /* Allocate data to copy the target text */ buf = (char *)malloc( fsize * sizeof ( char ) ) ; if ( buf == NULL ) { fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n", fsize ) ; return NULL ; } n_bytes = read( fd, buf, fsize ) ; if ( n_bytes != fsize ) { fprintf( stderr, "Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n", fsize, n_bytes) ; return NULL ; } #if APM_DEBUG printf( "Number of read bytes: %d\n", n_bytes ) ; #endif *size = n_bytes ; close( fd ) ; return buf ; } #define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c))) int levenshtein(char *s1, char *s2, int len, int * column) { unsigned int x, y, lastdiag, olddiag; for (y = 1; y <= len; y++) { column[y] = y; } for (x = 1; x <= len; x++) { column[0] = x; lastdiag = x-1 ; for (y = 1; y <= len; y++) { olddiag = column[y]; column[y] = MIN3( column[y] + 1, column[y-1] + 1, lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1) ); lastdiag = olddiag; } } return(column[len]); } int main( int argc, char ** argv ) { /* MPI initialisation */ int nb_nodes; int rank; MPI_Status status; int required = MPI_THREAD_FUNNELED; int provided; MPI_Init_thread(&argc, &argv, required, &provided); MPI_Comm_size(MPI_COMM_WORLD, &nb_nodes); MPI_Comm_rank(MPI_COMM_WORLD, &rank); #if APM_DEBUG char hostname[256]; gethostname(hostname, sizeof(hostname)); printf("Process MPI rank %d of PID %d on %s ready for attach\n",rank, getpid(), hostname); #endif char ** pattern ; char * filename ; int approx_factor = 0 ; int nb_patterns = 0 ; int i, j ; char * buf ; struct timeval t1, t2; double duration ; int n_bytes ; int tmp_matches ; int * n_matches ; int * global_matches; /* Check number of arguments */ if ( argc < 4 ) { printf( "Usage: %s approximation_factor " "dna_database pattern1 pattern2 ...\n", argv[0] ) ; return 1 ; } /* Get the distance factor */ approx_factor = atoi( argv[1] ) ; /* Grab the filename containing the target text */ filename = argv[2] ; /* Get the number of patterns that the user wants to search for */ nb_patterns = argc - 3 ; /* Fill the pattern array */ pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ; if ( pattern == NULL ) { fprintf( stderr, "Unable to allocate array of pattern of size %d\n", nb_patterns ) ; return 1 ; } /* Grab the patterns */ int max_len_pattern = 0; for ( i = 0 ; i < nb_patterns ; i++ ) { int l ; l = strlen(argv[i+3]) ; if ( l <= 0 ) { fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ; return 1 ; } else if (l > max_len_pattern) { max_len_pattern = l; } pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ; if ( pattern[i] == NULL ) { fprintf( stderr, "Unable to allocate string of size %d\n", l ) ; return 1 ; } strncpy( pattern[i], argv[i+3], (l+1) ) ; } printf( "Approximate Pattern Mathing: " "looking for %d pattern(s) in file %s w/ distance of %d\n", nb_patterns, filename, approx_factor ) ; /* Allocate the array of local matches */ n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ; if ( n_matches == NULL ) { fprintf( stderr, "Error: unable to allocate memory for %ldB\n", nb_patterns * sizeof( int ) ) ; return 1 ; } /* Allocate the array of global matches for the reductin of local n_matches*/ global_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ; if ( global_matches == NULL ) { fprintf( stderr, "Error: unable to allocate memory for %ldB\n", nb_patterns * sizeof( int ) ) ; return 1 ; } /***** * BEGIN MAIN LOOP ******/ /* Timer start */ gettimeofday(&t1, NULL); /* Since we have nb_nodes MPI process we are going to divide our textfile into nb_nodes parts while taking care that the biggest pattern have access to all it needs. rank 0 treats from 0 to n_bytes//nb_nodes - 1 + (max_len_pattern - 1) rank 1 treats from n_bytes//nb_nodes to 2*(n_bytes//size) - 1 + (max_len_pattern - 1) . rank i treat from i*(n_bytes//nb_nodes) to (i+1)*(n_bytes//size) - 1 + (max_len_pattern - 1) . rank (nb_nodes-1) treat from (nb_nodes-1)*(n_bytes//nb_nodes) to END */ /* rank 0 play the role of divider */ int part_bytes; // the number of bytes of the process part textfile MPI_Request requests[nb_nodes-1]; MPI_Status statutes[nb_nodes-1]; if (rank == 0) { /* reading input file */ buf = read_input_file( filename, &n_bytes ) ; if ( buf == NULL ) { return 1 ; } int start = 0; // start index of process int end = n_bytes/nb_nodes - 1 + (max_len_pattern - 1); // end index of process #if APM_DEBUG printf( "MPI rank 0 will treat from bytes %d to %d\n", start, end); #endif for (int i = 1; i < nb_nodes; i++) { /* Index and process part bytes */ start += (n_bytes/nb_nodes); end += (n_bytes/nb_nodes); if (i == nb_nodes - 1 || end > n_bytes) { end = n_bytes; } part_bytes = end - start + 1 ; #if APM_DEBUG printf("MPI rank %d will treat from bytes %d to %d\n",i,start,end); #endif /* Sending to each process other than 0*/ /* the part_bytes so they how much memory to allocate */ MPI_Send(&part_bytes,1,MPI_INTEGER,i,0,MPI_COMM_WORLD); #if APM_DEBUG printf("Rank 0 sended part_bytes : %d to rank %d\n",part_bytes,i); #endif MPI_Send(&buf[start],part_bytes,MPI_BYTE,i,1,MPI_COMM_WORLD); #if APM_DEBUG printf("Rank 0 sended a part_buffer to rank %d\n",i); #endif /* the start & end index of their part */ } // Reset part_bytes for process 0 : part_bytes = n_bytes/nb_nodes - 1 + max_len_pattern - 1; } else { // other process receive : // first : part_bytes : MPI_Recv(&part_bytes,1,MPI_INTEGER,0,0,MPI_COMM_WORLD,&status); // so they know how much to allocate buf = (char *) malloc((part_bytes+1)*sizeof(char)); if ( buf == NULL ) { fprintf( stderr, "Unable to allocate %ld byte(s) for buf array\n",part_bytes); return -1; } // secondly : part textfile : MPI_Recv(buf,part_bytes,MPI_BYTE,0,1,MPI_COMM_WORLD,&status); } for ( i = 0 ; i < nb_patterns ; i++ ) { int size_pattern = strlen(pattern[i]) ; int * column ; n_matches[i] = 0 ; tmp_matches = 0 ; #pragma omp parallel { int j_end = (rank == nb_nodes-1) ? part_bytes : part_bytes - size_pattern + 1; #pragma omp for schedule(guided) reduction(+:tmp_matches) for ( j = 0 ; j < j_end ; j++ ) { column = (int *)malloc( (size_pattern+1) * sizeof( int ) ) ; if ( column == NULL ) { fprintf( stderr, "Error: unable to allocate memory for column (%ldB)\n", (size_pattern+1) * sizeof( int ) ) ; exit(1); } int distance = 0 ; int size ; #if APM_DEBUG if ( j % 10000 == 0 ) { printf( "MPI rank %d : Processing byte %d (out of %d)\n",rank, j, part_bytes ) ; printf("local matches of rank %d: ",rank); for (int i = 0; i < nb_patterns; i++) { printf("%d,",n_matches[i]); } printf("\n"); } #endif size = size_pattern ; // modifying the edge case for the last MPI process if ( part_bytes - j < size_pattern ) { size = part_bytes - j ; } distance = levenshtein( pattern[i], &buf[j], size, column ) ; if ( distance <= approx_factor ) { tmp_matches = tmp_matches + 1 ; } } } free( column ); n_matches[i] = tmp_matches; } /* Sum the matches of each process with a MPI reduction */ MPI_Reduce(n_matches, global_matches, nb_patterns, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); /* Timer stop */ gettimeofday(&t2, NULL); duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6); #if APM_DEBUG printf("Rank %d finished :",rank); for ( i = 0 ; i < nb_patterns ; i++ ) { printf( "%d, ",n_matches[i] ) ; } printf("=============\n"); #endif /***** * END MAIN LOOP ******/ if (rank == 0) { printf( "APM done in %lf s\n", duration ) ; for ( i = 0 ; i < nb_patterns ; i++ ) { printf( "Number of matches for pattern <%s>: %d\n", pattern[i], global_matches[i] ) ; } } MPI_Finalize(); return 0 ; }
geo_yeefdtd.kernel_runtime.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include "local_header.h" #include "openmp_pscmc_inc.h" #include "geo_yeefdtd.kernel_inc.h" int openmp_YEE_CURL_R_init (openmp_pscmc_env * pe ,openmp_YEE_CURL_R_struct * kerstr ){ return 0 ;} void openmp_YEE_CURL_R_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_YEE_CURL_R_struct )); } int openmp_YEE_CURL_R_get_num_compute_units (openmp_YEE_CURL_R_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_YEE_CURL_R_get_xlen (){ return IDX_OPT_MAX ;} int openmp_YEE_CURL_R_exec (openmp_YEE_CURL_R_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_YEE_CURL_R_scmc_kernel ( ( kerstr )->inoutE1 , ( kerstr )->inB0 , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_YEE_CURL_R_scmc_set_parameter_inoutE1 (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inoutE1 = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_inB0 (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inB0 = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_xoffset (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_yoffset (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_zoffset (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_y_cpu_core (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_numvec (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_XLEN (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_YLEN (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_ZLEN (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_ovlp (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_xblock (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_yblock (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_zblock (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_num_ele (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_YEE_CURL_R_scmc_set_parameter_DT (openmp_YEE_CURL_R_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_GEO_YEE_CURL_L_init (openmp_pscmc_env * pe ,openmp_GEO_YEE_CURL_L_struct * kerstr ){ return 0 ;} void openmp_GEO_YEE_CURL_L_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_GEO_YEE_CURL_L_struct )); } int openmp_GEO_YEE_CURL_L_get_num_compute_units (openmp_GEO_YEE_CURL_L_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_GEO_YEE_CURL_L_get_xlen (){ return IDX_OPT_MAX ;} int openmp_GEO_YEE_CURL_L_exec (openmp_GEO_YEE_CURL_L_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_GEO_YEE_CURL_L_scmc_kernel ( ( kerstr )->inoutE1 , ( kerstr )->inB0 , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->x0)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_GEO_YEE_CURL_L_scmc_set_parameter_inoutE1 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inoutE1 = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_inB0 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inB0 = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_xoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_yoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_zoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_y_cpu_core (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_numvec (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_XLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_YLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_ZLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_ovlp (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_xblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_yblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_zblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_num_ele (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DT (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_Z (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Z = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_Y (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Y = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_X (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_X = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_x0 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->x0 = pm->d_data); }
gemm_x_dia_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < mat->rows; ++r) { ALPHA_Number *Y = &y[index2(r, 0, ldy)]; for (ALPHA_INT c = 0; c < columns; c++) alpha_mul(Y[c],Y[c],beta); } #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid,num_threads,columns); ALPHA_INT bch = cross_block_high(tid,num_threads,columns); for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number *Y = &y[index2(ar, 0, ldy)]; const ALPHA_Number *X = &x[index2(ac, 0, ldx)]; ALPHA_Number val; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); for(ALPHA_INT bc = bcl;bc < bch;++bc){ alpha_madde(Y[bc],val,X[bc]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
pfem_2_monolithic_slip_strategy.h
#ifndef KRATOS_PFEM2_MONOLITHIC_SLIP_STRATEGY_H #define KRATOS_PFEM2_MONOLITHIC_SLIP_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/implicit_solving_strategy.h" //#include "custom_elements/fractional_step.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h" #include "solving_strategies/strategies/residualbased_linear_strategy.h" #include "custom_utilities/solver_settings.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template<class TSparseSpace, class TDenseSpace, class TLinearSolver > class PFEM2MonolithicSlipStrategy : public ImplicitSolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver> { public: ///@name Type Definitions ///@{ /// Counted pointer of FSStrategy typedef boost::shared_ptr< FSStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef SolverSettings<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector): BaseType(rModelPart,false), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { InitializeStrategy(rSolverConfig,PredictorCorrector); } PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector, const Kratos::Variable<int>& PeriodicVar): BaseType(rModelPart,false), mrPeriodicIdVar(PeriodicVar) { InitializeStrategy(rSolverConfig,PredictorCorrector); } SolvingStrategyPython(self.model_part, self.time_scheme, self.monolithic_linear_solver, self.conv_criteria, CalculateReactionFlag, ReformDofSetAtEachStep, MoveMeshFlag) self.monolithic_solver.SetMaximumIterations(self.maximum_nonlin_iterations) PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pLinearSolver, bool ReformDofSet = true, double Tol = 0.01, int MaxIterations = 3, unsigned int DomainSize = 2): BaseType(rModelPart,MoveMeshFlag), // Move Mesh flag, pass as input? mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxVelocityIter(MaxVelocityIterations), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mPredictorCorrector(PredictorCorrector), mUseSlipConditions(true), ///@todo initialize somehow mReformDofSet(ReformDofSet), mExtraIterationSteps(), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateReactions = false; bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent; typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme< TSparseSpace, TDenseSpace > SchemeType; typename SchemeType::Pointer pScheme; if (mUseSlipConditions) { typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticSchemeSlip< TSparseSpace, TDenseSpace > (mDomainSize,mDomainSize)); pScheme.swap(Temp); } else { typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); pScheme.swap(Temp); } //CONSTRUCTION OF VELOCITY // BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); // BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverSlip<TSparseSpace, TDenseSpace, TLinearSolver, VarComponent > (pNewVelocityLinearSolver, this->mDomainSize, VELOCITY_X, VELOCITY_Y, VELOCITY_Z)); // this->mpMomentumStrategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pVelocityLinearSolver, vel_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pVelocityLinearSolver, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel( BaseType::GetEchoLevel() ); // BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer( // //new ResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>(pPressureLinearSolver)); // new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); // this->mpPressureStrategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pPressureLinearSolver, pressure_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pPressureLinearSolver, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel( BaseType::GetEchoLevel() ); if (mUseSlipConditions) { #pragma omp parallel { ModelPart::ConditionIterator CondBegin; ModelPart::ConditionIterator CondEnd; OpenMPUtils::PartitionedIterators(rModelPart.Conditions(),CondBegin,CondEnd); for (ModelPart::ConditionIterator itCond = CondBegin; itCond != CondEnd; ++itCond) { const double FlagValue = itCond->GetValue(IS_STRUCTURE); itCond->Set(SLIP); if (FlagValue != 0.0) { Condition::GeometryType& rGeom = itCond->GetGeometry(); for (unsigned int i = 0; i < rGeom.PointsNumber(); ++i) { rGeom[i].SetLock(); rGeom[i].SetValue(IS_STRUCTURE,FlagValue); rGeom[i].Set(SLIP); rGeom[i].UnSetLock(); } } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_STRUCTURE); rModelPart.GetCommunicator().SynchronizeOrNodalFlags(SLIP); } KRATOS_CATCH(""); } /// Destructor. virtual ~FSStrategy(){} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ virtual int Check() { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if(DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error,"DELTA_TIME Key is 0. Check that the application was correctly registered.",""); if(BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error,"BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.",""); ModelPart& rModelPart = BaseType::GetModelPart(); if ( mTimeOrder == 2 && rModelPart.GetBufferSize() < 3 ) KRATOS_THROW_ERROR(std::invalid_argument,"Buffer size too small for fractional step strategy (BDF2), needed 3, got ",rModelPart.GetBufferSize()); if ( mTimeOrder == 1 && rModelPart.GetBufferSize() < 2 ) KRATOS_THROW_ERROR(std::invalid_argument,"Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ",rModelPart.GetBufferSize()); const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); for ( ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl ) { ierr = itEl->Check(rCurrentProcessInfo); if (ierr != 0) break; } for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) { ierr = itCond->Check(rCurrentProcessInfo); if (ierr != 0) break; } return ierr; KRATOS_CATCH(""); } virtual double Solve() { // Initialize BDF2 coefficients ModelPart& rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); double NormDp = 0.0; if (mPredictorCorrector) { bool Converged = false; // Iterative solution for pressure for(unsigned int it = 0; it < mMaxPressureIter; ++it) { if ( BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Pressure iteration " << it << std::endl; NormDp = this->SolveStep(); Converged = this->CheckPressureConvergence(NormDp); if ( Converged ) { if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Predictor-corrector converged in " << it+1 << " iterations." << std::endl; break; } } if (!Converged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Predictor-correctior iterations did not converge." << std::endl; } else { // Solve for fractional step velocity, then update pressure once NormDp = this->SolveStep(); } if (mReformDofSet) this->Clear(); return NormDp; } virtual void CalculateReactions() { ModelPart& rModelPart = BaseType::GetModelPart(); ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // Set fractional step index to the momentum equation step int OriginalStep = rCurrentProcessInfo[FRACTIONAL_STEP]; rCurrentProcessInfo.SetValue(FRACTIONAL_STEP,1); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); const array_1d<double,3> Zero(3,0.0); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { itNode->FastGetSolutionStepValue(REACTION) = Zero; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { // Build local system itElem->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); Element::GeometryType& rGeom = itElem->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { rGeom[i].SetLock(); array_1d<double,3>& rReaction = rGeom[i].FastGetSolutionStepValue(REACTION); for (unsigned int d = 0; d < mDomainSize; ++d) rReaction[d] -= RHS_Contribution[index++]; rGeom[i].UnSetLock(); } } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // Reset original fractional step index rCurrentProcessInfo.SetValue(FRACTIONAL_STEP,OriginalStep); } virtual void AddIterationStep(Process::Pointer pNewStep) { mExtraIterationSteps.push_back(pNewStep); } virtual void ClearExtraIterationSteps() { mExtraIterationSteps.clear(); } virtual void Clear() { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ virtual void SetEchoLevel(int Level) { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "FSStrategy" ; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {rOStream << "FSStrategy";} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo& rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } double SolveStep() { ModelPart& rModelPart = BaseType::GetModelPart(); // 1. Fractional step momentum iteration rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); bool Converged = false; int Rank = rModelPart.GetCommunicator().MyPID(); for(unsigned int it = 0; it < mMaxVelocityIter; ++it) { if ( BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "Momentum iteration " << it << std::endl; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); double NormDv = mpMomentumStrategy->Solve(); // // Compute projections (for stabilization) // rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); // this->ComputeSplitOssProjections(rModelPart); // // Additional steps // Moved to end of step // for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin(); // iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps) // (*iExtraSteps)->Execute(); // Check convergence Converged = this->CheckFractionalStepConvergence(NormDv); if (Converged) { if ( BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Fractional velocity converged in " << it+1 << " iterations." << std::endl; break; } } if (!Converged && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Fractional velocity iterations did not converge." << std::endl; // Compute projections (for stabilization) rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); this->ComputeSplitOssProjections(rModelPart); // 2. Pressure solution (store pressure variation in PRESSURE_OLD_IT) rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,5); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { const double OldPress = itNode->FastGetSolutionStepValue(PRESSURE); itNode->FastGetSolutionStepValue(PRESSURE_OLD_IT) = -OldPress; } } if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Calculating Pressure." << std::endl; double NormDp = mpPressureStrategy->Solve(); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) itNode->FastGetSolutionStepValue(PRESSURE_OLD_IT) += itNode->FastGetSolutionStepValue(PRESSURE); } // 3. Compute end-of-step velocity if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Updating Velocity." << std::endl; rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,6); this->CalculateEndOfStepVelocity(); // Additional steps for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin(); iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps) (*iExtraSteps)->Execute(); return NormDp; } bool CheckFractionalStepConvergence(const double NormDv) { ModelPart& rModelPart = BaseType::GetModelPart(); double NormV = 0.00; #pragma omp parallel reduction(+:NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double,3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) NormV += Vel[d] * Vel[d]; } } BaseType::GetModelPart().GetCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; double Ratio = NormDv / NormV; if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Fractional velocity relative error: " << Ratio << std::endl; if (Ratio < mVelocityTolerance) { return true; } else return false; } bool CheckPressureConvergence(const double NormDp) { ModelPart& rModelPart = BaseType::GetModelPart(); double NormP = 0.00; #pragma omp parallel reduction(+:NormP) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } } BaseType::GetModelPart().GetCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; double Ratio = NormDp / NormP; if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Pressure relative error: " << Ratio << std::endl; if (Ratio < mPressureTolerance) { return true; } else return false; } void ComputeSplitOssProjections(ModelPart& rModelPart) { const array_1d<double,3> Zero(3,0.0); array_1d<double,3> Out(3,0.0); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { itNode->FastGetSolutionStepValue(CONV_PROJ) = Zero; itNode->FastGetSolutionStepValue(PRESS_PROJ) = Zero; itNode->FastGetSolutionStepValue(DIVPROJ) = 0.0; itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem ) { itElem->Calculate(CONV_PROJ,Out,rModelPart.GetProcessInfo()); } } rModelPart.GetCommunicator().AssembleCurrentData(CONV_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); // If there are periodic conditions, add contributions from both sides to the periodic nodes this->PeriodicConditionProjectionCorrection(rModelPart); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); itNode->FastGetSolutionStepValue(CONV_PROJ) /= NodalArea; itNode->FastGetSolutionStepValue(PRESS_PROJ) /= NodalArea; itNode->FastGetSolutionStepValue(DIVPROJ) /= NodalArea; } } } void CalculateEndOfStepVelocity() { ModelPart& rModelPart = BaseType::GetModelPart(); const array_1d<double,3> Zero(3,0.0); array_1d<double,3> Out(3,0.0); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { itNode->FastGetSolutionStepValue(FRACT_VEL) = Zero; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem ) { itElem->Calculate(VELOCITY,Out,rModelPart.GetProcessInfo()); } } rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL); this->PeriodicConditionVelocityCorrection(rModelPart); // Force the end of step velocity to verify slip conditions in the model if (mUseSlipConditions) this->EnforceSlipCondition(SLIP); if (mDomainSize > 2) { #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); if ( ! itNode->IsFixed(VELOCITY_X) ) itNode->FastGetSolutionStepValue(VELOCITY_X) += itNode->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Y) ) itNode->FastGetSolutionStepValue(VELOCITY_Y) += itNode->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Z) ) itNode->FastGetSolutionStepValue(VELOCITY_Z) += itNode->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalArea; } } } else { #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); if ( ! itNode->IsFixed(VELOCITY_X) ) itNode->FastGetSolutionStepValue(VELOCITY_X) += itNode->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Y) ) itNode->FastGetSolutionStepValue(VELOCITY_Y) += itNode->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; } } } } /** * @brief Substract wall-normal component of velocity update to ensure that the final velocity satisfies slip conditions. * @param rSlipWallFlag If Node.Is(rSlipWallFlag) the node is in the wall. */ void EnforceSlipCondition(const Kratos::Flags& rSlipWallFlag) { ModelPart& rModelPart = BaseType::GetModelPart(); #pragma omp parallel { ModelPart::NodeIterator NodeBegin; // = rModelPart.NodesBegin(); ModelPart::NodeIterator NodeEnd; // = rModelPart.NodesEnd(); OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for ( ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode ) { if ( itNode->Is(rSlipWallFlag) ) { const array_1d<double,3>& rNormal = itNode->FastGetSolutionStepValue(NORMAL); array_1d<double,3>& rDeltaVelocity = itNode->FastGetSolutionStepValue(FRACT_VEL); double Proj = rNormal[0] * rDeltaVelocity[0]; double Norm = rNormal[0] * rNormal[0]; for (unsigned int d = 1; d < mDomainSize; ++d) { Proj += rNormal[d] * rDeltaVelocity[d]; Norm += rNormal[d] * rNormal[d]; } Proj /= Norm; rDeltaVelocity -= Proj * rNormal; } } } } /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, transmiting the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { if (mrPeriodicIdVar.Key() != 0) { int GlobalNodesNum = rModelPart.GetCommunicator().LocalMesh().Nodes().size(); rModelPart.GetCommunicator().SumAll(GlobalNodesNum); for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ ) { ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry(); if (rGeom.PointsNumber() == 2) { Node<3>& rNode0 = rGeom[0]; int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar); Node<3>& rNode1 = rGeom[1]; int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar); // If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition) if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) ) { double NodalArea = rNode0.FastGetSolutionStepValue(NODAL_AREA) + rNode1.FastGetSolutionStepValue(NODAL_AREA); array_1d<double,3> ConvProj = rNode0.FastGetSolutionStepValue(CONV_PROJ) + rNode1.FastGetSolutionStepValue(CONV_PROJ); array_1d<double,3> PressProj = rNode0.FastGetSolutionStepValue(PRESS_PROJ) + rNode1.FastGetSolutionStepValue(PRESS_PROJ); double DivProj = rNode0.FastGetSolutionStepValue(DIVPROJ) + rNode1.FastGetSolutionStepValue(DIVPROJ); rNode0.GetValue(NODAL_AREA) = NodalArea; rNode0.GetValue(CONV_PROJ) = ConvProj; rNode0.GetValue(PRESS_PROJ) = PressProj; rNode0.GetValue(DIVPROJ) = DivProj; rNode1.GetValue(NODAL_AREA) = NodalArea; rNode1.GetValue(CONV_PROJ) = ConvProj; rNode1.GetValue(PRESS_PROJ) = PressProj; rNode1.GetValue(DIVPROJ) = DivProj; } } else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum) { double NodalArea = rGeom[0].FastGetSolutionStepValue(NODAL_AREA); array_1d<double,3> ConvProj = rGeom[0].FastGetSolutionStepValue(CONV_PROJ); array_1d<double,3> PressProj = rGeom[0].FastGetSolutionStepValue(PRESS_PROJ); double DivProj = rGeom[0].FastGetSolutionStepValue(DIVPROJ); for (unsigned int i = 1; i < 4; i++) { NodalArea += rGeom[i].FastGetSolutionStepValue(NODAL_AREA); ConvProj += rGeom[i].FastGetSolutionStepValue(CONV_PROJ); PressProj += rGeom[i].FastGetSolutionStepValue(PRESS_PROJ); DivProj += rGeom[i].FastGetSolutionStepValue(DIVPROJ); } for (unsigned int i = 0; i < 4; i++) { rGeom[i].GetValue(NODAL_AREA) = NodalArea; rGeom[i].GetValue(CONV_PROJ) = ConvProj; rGeom[i].GetValue(PRESS_PROJ) = PressProj; rGeom[i].GetValue(DIVPROJ) = DivProj; } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(CONV_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { if (itNode->GetValue(NODAL_AREA) != 0.0) { itNode->FastGetSolutionStepValue(NODAL_AREA) = itNode->GetValue(NODAL_AREA); itNode->FastGetSolutionStepValue(CONV_PROJ) = itNode->GetValue(CONV_PROJ); itNode->FastGetSolutionStepValue(PRESS_PROJ) = itNode->GetValue(PRESS_PROJ); itNode->FastGetSolutionStepValue(DIVPROJ) = itNode->GetValue(DIVPROJ); // reset for next iteration itNode->GetValue(NODAL_AREA) = 0.0; itNode->GetValue(CONV_PROJ) = array_1d<double,3>(3,0.0); itNode->GetValue(PRESS_PROJ) = array_1d<double,3>(3,0.0); itNode->GetValue(DIVPROJ) = 0.0; } } } } void PeriodicConditionVelocityCorrection(ModelPart& rModelPart) { if (mrPeriodicIdVar.Key() != 0) { int GlobalNodesNum = rModelPart.GetCommunicator().LocalMesh().Nodes().size(); rModelPart.GetCommunicator().SumAll(GlobalNodesNum); for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ ) { ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry(); if (rGeom.PointsNumber() == 2) { Node<3>& rNode0 = rGeom[0]; int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar); Node<3>& rNode1 = rGeom[1]; int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar); // If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition) if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) ) { array_1d<double,3> DeltaVel = rNode0.FastGetSolutionStepValue(FRACT_VEL) + rNode1.FastGetSolutionStepValue(FRACT_VEL); rNode0.GetValue(FRACT_VEL) = DeltaVel; rNode1.GetValue(FRACT_VEL) = DeltaVel; } } else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum) { array_1d<double,3> DeltaVel = rGeom[0].FastGetSolutionStepValue(FRACT_VEL); for (unsigned int i = 1; i < 4; i++) { DeltaVel += rGeom[i].FastGetSolutionStepValue(FRACT_VEL); } for (unsigned int i = 0; i < 4; i++) { rGeom[i].GetValue(FRACT_VEL) = DeltaVel; } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(FRACT_VEL); for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { array_1d<double,3>& rDeltaVel = itNode->GetValue(FRACT_VEL); if ( rDeltaVel[0]*rDeltaVel[0] + rDeltaVel[1]*rDeltaVel[1] + rDeltaVel[2]*rDeltaVel[2] != 0.0) { itNode->FastGetSolutionStepValue(FRACT_VEL) = itNode->GetValue(FRACT_VEL); rDeltaVel = array_1d<double,3>(3,0.0); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxVelocityIter; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mPredictorCorrector; bool mUseSlipConditions; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; std::vector< Process::Pointer > mExtraIterationSteps; const Kratos::Variable<int>& mrPeriodicIdVar; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void InitializeStrategy(SolverSettingsType& rSolverConfig, bool PredictorCorrector) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); ModelPart& rModelPart = this->GetModelPart(); mDomainSize = rSolverConfig.GetDomainSize(); mPredictorCorrector = PredictorCorrector; mUseSlipConditions = rSolverConfig.UseSlipConditions(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity,mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity,mVelocityTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); } else { KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Velocity strategy defined in FractionalStepSettings",""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure,mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure,mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure,mMaxPressureIter); } else { KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Pressure strategy defined in FractionalStepSettings",""); } Process::Pointer pTurbulenceProcess; bool HaveTurbulence = rSolverConfig.GetTurbulenceModel(pTurbulenceProcess); if (HaveTurbulence) mExtraIterationSteps.push_back(pTurbulenceProcess); // Set up nodes to use slip conditions if needed. if (mUseSlipConditions) { #pragma omp parallel { ModelPart::ConditionIterator CondBegin; ModelPart::ConditionIterator CondEnd; OpenMPUtils::PartitionedIterators(rModelPart.Conditions(),CondBegin,CondEnd); for (ModelPart::ConditionIterator itCond = CondBegin; itCond != CondEnd; ++itCond) { const bool is_slip = itCond->Is(SLIP); if (is_slip) { Condition::GeometryType& rGeom = itCond->GetGeometry(); for (unsigned int i = 0; i < rGeom.PointsNumber(); ++i) { rGeom[i].SetLock(); rGeom[i].Set(SLIP); rGeom[i].UnSetLock(); } } } } rModelPart.GetCommunicator().SynchronizeOrNodalFlags(SLIP); } // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. FSStrategy& operator=(FSStrategy const& rOther){} /// Copy constructor. FSStrategy(FSStrategy const& rOther){} ///@} }; /// Class FStepStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_FS_STRATEGY_H
dc.c
/* !-------------------------------------------------------------------------! ! ! ! N A S P A R A L L E L B E N C H M A R K S 3.3 ! ! ! ! O p e n M P V E R S I O N ! ! ! ! D C ! ! ! !-------------------------------------------------------------------------! ! ! ! DC creates all specifided data-cube views in parallel. ! ! Refer to NAS Technical Report 03-005 for details. ! ! It calculates all groupbys in a top down manner using well known ! ! heuristics and optimizations. ! ! ! ! Permission to use, copy, distribute and modify this software ! ! for any purpose with or without fee is hereby granted. We ! ! request, however, that all derived work reference the NAS ! ! Parallel Benchmarks 3.3. This software is provided "as is" ! ! without express or implied warranty. ! ! ! ! Information on NPB 3.3, including the technical report, the ! ! original specifications, source code, results and information ! ! on how to submit new results, is available at: ! ! ! ! http://www.nas.nasa.gov/Software/NPB/ ! ! ! ! Send comments or suggestions to npb@nas.nasa.gov ! ! ! ! NAS Parallel Benchmarks Group ! ! NASA Ames Research Center ! ! Mail Stop: T27A-1 ! ! Moffett Field, CA 94035-1000 ! ! ! ! E-mail: npb@nas.nasa.gov ! ! Fax: (650) 604-3957 ! ! ! !-------------------------------------------------------------------------! ! Author: Michael Frumkin ! ! Leonid Shabanov ! !-------------------------------------------------------------------------! */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <ctype.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "adc.h" #include "macrodef.h" #include "npbparams.h" #ifdef UNIX #include <sys/types.h> #include <unistd.h> #define MAX_TIMERS 64 /* NPB maximum timers */ void timer_clear(int); void timer_start(int); void timer_stop(int); double timer_read(int); #endif void c_print_results( char *name, char clss, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags ); void initADCpar(ADC_PAR *par); int ParseParFile(char* parfname, ADC_PAR *par); int GenerateADC(ADC_PAR *par); void ShowADCPar(ADC_PAR *par); int32 DC(ADC_VIEW_PARS *adcpp); int Verify(long long int checksum,ADC_VIEW_PARS *adcpp); #define BlockSize 1024 int main ( int argc, char * argv[] ) { ADC_PAR *parp; ADC_VIEW_PARS *adcpp; int32 retCode; fprintf(stdout,"\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - DC Benchmark\n\n" ); if(argc!=3){ fprintf(stdout," No Paramter file. Using compiled defaults\n"); } if(argc>3 || (argc>1 && !isdigit(argv[1][0]))){ fprintf(stderr,"Usage: <program name> <amount of memory>\n"); fprintf(stderr," <file of parameters>\n"); fprintf(stderr,"Example: bin/dc.S 1000000 DC/ADC.par\n"); fprintf(stderr,"The last argument, (a parameter file) can be skipped\n"); exit(1); } if( !(parp = (ADC_PAR*) malloc(sizeof(ADC_PAR))) ||!(adcpp = (ADC_VIEW_PARS*) malloc(sizeof(ADC_VIEW_PARS)))){ PutErrMsg("main: malloc failed") exit(1); } initADCpar(parp); parp->clss=CLASS; if(argc!=3){ parp->dim=attrnum; parp->tuplenum=input_tuples; }else if( (argc==3)&&(!ParseParFile(argv[2], parp))) { PutErrMsg("main.ParseParFile failed") exit(1); } ShowADCPar(parp); if(!GenerateADC(parp)) { PutErrMsg("main.GenerateAdc failed") exit(1); } adcpp->ndid = parp->ndid; adcpp->clss = parp->clss; adcpp->nd = parp->dim; adcpp->nm = parp->mnum; adcpp->nTasks = 1; if(argc>=2) adcpp->memoryLimit = atoi(argv[1]); else adcpp->memoryLimit = 0; if(adcpp->memoryLimit <= 0){ /* size of rb-tree with tuplenum nodes */ adcpp->memoryLimit = parp->tuplenum*(50+5*parp->dim); fprintf(stdout,"Estimated rb-tree size = %d \n", adcpp->memoryLimit); } adcpp->nInputRecs = parp->tuplenum; strcpy(adcpp->adcName, parp->filename); strcpy(adcpp->adcInpFileName, parp->filename); if((retCode=DC(adcpp))) { PutErrMsg("main.DC failed") fprintf(stderr, "main.ParRun failed: retcode = %d\n", retCode); exit(1); } if(parp) { free(parp); parp = 0; } if(adcpp) { free(adcpp); adcpp = 0; } return 0; } int32 CloseAdcView(ADC_VIEW_CNTL *adccntl); int32 PartitionCube(ADC_VIEW_CNTL *avp); ADC_VIEW_CNTL *NewAdcViewCntl(ADC_VIEW_PARS *adcpp, uint32 pnum); int32 ComputeGivenGroupbys(ADC_VIEW_CNTL *adccntl); int32 DC(ADC_VIEW_PARS *adcpp) { int32 itsk=0; double t_total=0.0; int verified; typedef struct { int verificationFailed; uint32 totalViewTuples; uint64 totalViewSizesInBytes; uint32 totalNumberOfMadeViews; uint64 checksum; double tm_max; } PAR_VIEW_ST; PAR_VIEW_ST *pvstp; pvstp = (PAR_VIEW_ST*) malloc(sizeof(PAR_VIEW_ST)); pvstp->verificationFailed = 0; pvstp->totalViewTuples = 0; pvstp->totalViewSizesInBytes = 0; pvstp->totalNumberOfMadeViews = 0; pvstp->checksum = 0; #ifdef _OPENMP adcpp->nTasks=omp_get_max_threads(); fprintf(stdout,"\nNumber of available threads: %d\n", adcpp->nTasks); if (adcpp->nTasks > MAX_NUMBER_OF_TASKS) { adcpp->nTasks = MAX_NUMBER_OF_TASKS; fprintf(stdout,"Warning: Maximum number of tasks reached: %d\n", adcpp->nTasks); } #pragma omp parallel shared(pvstp) private(itsk) #endif { double tm0=0; int itimer=0; ADC_VIEW_CNTL *adccntlp; #ifdef _OPENMP itsk=omp_get_thread_num(); #endif adccntlp = NewAdcViewCntl(adcpp, itsk); if (!adccntlp) { PutErrMsg("ParRun.NewAdcViewCntl: returned NULL") adccntlp->verificationFailed=1; }else{ adccntlp->verificationFailed = 0; if (adccntlp->retCode!=0) { fprintf(stderr, "DC.NewAdcViewCntl: return code = %d\n", adccntlp->retCode); } } if (!adccntlp->verificationFailed) { if( PartitionCube(adccntlp) ) { PutErrMsg("DC.PartitionCube failed"); } timer_clear(itimer); timer_start(itimer); if( ComputeGivenGroupbys(adccntlp) ) { PutErrMsg("DC.ComputeGivenGroupbys failed"); } timer_stop(itimer); tm0 = timer_read(itimer); } #ifdef _OPENMP #pragma omp critical #endif { if(pvstp->tm_max<tm0) pvstp->tm_max=tm0; pvstp->verificationFailed += adccntlp->verificationFailed; if (!adccntlp->verificationFailed) { pvstp->totalNumberOfMadeViews += adccntlp->numberOfMadeViews; pvstp->totalViewSizesInBytes += adccntlp->totalViewFileSize; pvstp->totalViewTuples += adccntlp->totalOfViewRows; pvstp->checksum += adccntlp->totchs[0]; } } if(CloseAdcView(adccntlp)) { PutErrMsg("ParRun.CloseAdcView: is failed"); adccntlp->verificationFailed = 1; } } /* omp parallel */ t_total=pvstp->tm_max; pvstp->verificationFailed=Verify(pvstp->checksum,adcpp); verified = (pvstp->verificationFailed == -1)? -1 : (pvstp->verificationFailed == 0)? 1 : 0; fprintf(stdout,"\n*** DC Benchmark Results:\n"); fprintf(stdout," Benchmark Time = %20.3f\n", t_total); fprintf(stdout," Input Tuples = %12d\n", (int) adcpp->nInputRecs); fprintf(stdout," Number of Views = %12d\n", (int) pvstp->totalNumberOfMadeViews); fprintf(stdout," Number of Tasks = %12d\n", (int) adcpp->nTasks); fprintf(stdout," Tuples Generated = %20.0f\n", (double) pvstp->totalViewTuples); fprintf(stdout," Tuples/s = %20.2f\n", (double) pvstp->totalViewTuples / t_total); fprintf(stdout," Checksum = %20.12e\n", (double) pvstp->checksum); if (pvstp->verificationFailed) fprintf(stdout, " Verification failed\n"); c_print_results("DC", adcpp->clss, (int)adcpp->nInputRecs, 0, 0, 1, t_total, (double) pvstp->totalViewTuples * 1.e-6 / t_total, "Tuples generated", verified, NPBVERSION, COMPILETIME, CC, CLINK, C_LIB, C_INC, CFLAGS, CLINKFLAGS); return ADC_OK; } long long checksumS=464620213; long long checksumWlo=434318; long long checksumWhi=1401796; long long checksumAlo=178042; long long checksumAhi=7141688; long long checksumBlo=700453; long long checksumBhi=9348365; int Verify(long long int checksum,ADC_VIEW_PARS *adcpp){ switch(adcpp->clss){ case 'S': if(checksum==checksumS) return 0; break; case 'W': if(checksum==checksumWlo+1000000*checksumWhi) return 0; break; case 'A': if(checksum==checksumAlo+1000000*checksumAhi) return 0; break; case 'B': if(checksum==checksumBlo+1000000*checksumBhi) return 0; break; default: return -1; /* CLASS U */ } return 1; }
pdlansy.c
/** * * @file pdlansy.c * * PLASMA auxiliary routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Emmanuel Agullo * @author Mathieu Faverge * @date 2010-11-15 * @generated d Tue Jan 7 11:45:11 2014 * **/ #include <stdlib.h> #include <math.h> #include "common.h" #define A(m, n, i, j, ldt) (BLKADDR(A, double, m, n)+((j)*(ldt)+(i))) /***************************************************************************//** * **/ void plasma_pdlansy(plasma_context_t *plasma) { PLASMA_enum norm; PLASMA_enum uplo; PLASMA_desc A; double *work; double *result; PLASMA_sequence *sequence; PLASMA_request *request; int m, n; int next_m; int next_n; int ldam, ldan; int step, lrank; int X, X1, X2, Y, Y1, Y2; double* lwork; double normtmp, normtmp2; double *scale, *sumsq; double scale2, sumsq2; plasma_unpack_args_7(norm, uplo, A, work, result, sequence, request); *result = 0.0; if (PLASMA_RANK == 0) { if ( norm == PlasmaFrobeniusNorm ) { memset(work, 0, 2*PLASMA_SIZE*sizeof(double)); } else { memset(work, 0, PLASMA_SIZE*sizeof(double)); } } ss_init(PLASMA_SIZE, 1, 0); switch (norm) { /* * PlasmaMaxNorm */ case PlasmaMaxNorm: n = 0; m = PLASMA_RANK; while (m >= A.mt && n < A.nt) { n++; m = m-A.mt+n; } while (n < A.nt) { next_m = m; next_n = n; next_m += PLASMA_SIZE; while (next_m >= A.mt && next_n < A.nt) { next_n++; next_m = next_m-A.mt+next_n; } if (m == n) { X1 = m == 0 ? A.i %A.mb : 0; X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; ldam = BLKLDD(A, m); CORE_dlansy(PlasmaMaxNorm, uplo, X, A(m, n, X1, X1, ldam), ldam, NULL, &normtmp); } else { /* * PlasmaLower */ if (uplo == PlasmaLower) { X1 = m == 0 ? A.i %A.mb : 0; X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; Y1 = n == 0 ? A.j %A.nb : 0; Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; Y = Y2 - Y1; ldam = BLKLDD(A, m); CORE_dlange(PlasmaMaxNorm, X, Y, A(m, n, X1, Y1, ldam), ldam, NULL, &normtmp); } /* * PlasmaUpper */ else { X1 = n == 0 ? A.i %A.mb : 0; X2 = n == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; Y1 = m == 0 ? A.j %A.nb : 0; Y2 = m == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; Y = Y2 - Y1; ldan = BLKLDD(A, n); CORE_dlange(PlasmaMaxNorm, X, Y, A(n, m, X1, Y1, ldan), ldan, NULL, &normtmp); } } if ( normtmp > work[PLASMA_RANK] ) work[PLASMA_RANK] = normtmp; m = next_m; n = next_n; } ss_cond_set(PLASMA_RANK, 0, 1); break; /* * PlasmaOneNorm / PlasmaInfNorm */ case PlasmaOneNorm: case PlasmaInfNorm: m = PLASMA_RANK; normtmp2 = 0.0; lwork = (double*)plasma_private_alloc(plasma, A.mb, PlasmaRealDouble); while (m < A.mt) { X1 = m == 0 ? A.i %A.mb : 0; X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; ldam = BLKLDD(A, m); memset(lwork, 0, A.mb*sizeof(double)); /* * PlasmaLower */ if (uplo == PlasmaLower) { for (n = 0; n < m; n++) { Y1 = n == 0 ? A.j%A.nb : 0; Y = A.nb - Y1; CORE_dasum(PlasmaRowwise, PlasmaUpperLower, X, Y, A(m, n, X1, Y1, ldam), ldam, lwork); } CORE_dasum(PlasmaRowwise, uplo, X, X, A(m, m, X1, X1, ldam), ldam, lwork); for (n = m+1; n < A.mt; n++) { Y = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; ldan = BLKLDD(A, n); CORE_dasum(PlasmaColumnwise, PlasmaUpperLower, Y, X, A(n, m, 0, X1, ldan), ldan, lwork); } } /* * PlasmaUpper */ else { for (n = 0; n < m; n++) { Y1 = n == 0 ? A.j%A.nb : 0; Y = A.nb - Y1; CORE_dasum(PlasmaColumnwise, PlasmaUpperLower, Y, X, A(n, m, Y1, X1, A.nb), A.nb, lwork); } CORE_dasum(PlasmaRowwise, uplo, X, X, A(m, m, X1, X1, ldam), ldam, lwork); for (n = m+1; n < A.mt; n++) { Y = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; CORE_dasum(PlasmaRowwise, PlasmaUpperLower, X, Y, A(m, n, X1, 0, ldam), ldam, lwork); } } CORE_dlange(PlasmaMaxNorm, X, 1, lwork, 1, NULL, &normtmp); if ( normtmp > normtmp2 ) normtmp2 = normtmp; m += PLASMA_SIZE; } work[PLASMA_RANK] = normtmp2; ss_cond_set(PLASMA_RANK, 0, 1); plasma_private_free(plasma, lwork); break; /* * PlasmaFrobeniusNorm */ case PlasmaFrobeniusNorm: scale = work + 2 * PLASMA_RANK; sumsq = work + 2 * PLASMA_RANK + 1; *scale = 0.; *sumsq = 1.; n = 0; m = PLASMA_RANK; while (m >= A.mt && n < A.nt) { n++; m = m-A.mt+n; } while (n < A.nt) { next_m = m; next_n = n; next_m += PLASMA_SIZE; while (next_m >= A.mt && next_n < A.nt) { next_n++; next_m = next_m-A.mt+next_n; } if (m == n) { X1 = m == 0 ? A.i %A.mb : 0; X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; ldam = BLKLDD(A, m); CORE_dsyssq( uplo, X, A(m, n, X1, X1, ldam), ldam, scale, sumsq ); } else { scale2 = 0.; sumsq2 = 1.; /* * PlasmaLower */ if (uplo == PlasmaLower) { X1 = m == 0 ? A.i %A.mb : 0; X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; Y1 = n == 0 ? A.j %A.nb : 0; Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; Y = Y2 - Y1; ldam = BLKLDD(A, m); CORE_dgessq( X, Y, A(m, n, X1, Y1, ldam), ldam, &scale2, &sumsq2 ); } /* * PlasmaUpper */ else { X1 = n == 0 ? A.i %A.mb : 0; X2 = n == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; Y1 = m == 0 ? A.j %A.nb : 0; Y2 = m == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; Y = Y2 - Y1; ldan = BLKLDD(A, n); CORE_dgessq( X, Y, A(n, m, X1, Y1, ldan), ldan, &scale2, &sumsq2 ); } sumsq2 *= 2.; if ( scale2 != 0. ){ if ( *scale < scale2 ) { *sumsq = sumsq2 + (*sumsq) * ( *scale / scale2 ) * ( *scale / scale2 ); *scale = scale2; } else { *sumsq = *sumsq + sumsq2 * ( scale2 / *scale ) * ( scale2 / *scale ); } } } m = next_m; n = next_n; } ss_cond_set(PLASMA_RANK, 0, 1); break; default:; } if (norm != PlasmaFrobeniusNorm) { step = 1; lrank = PLASMA_RANK; while ( (lrank%2 == 0) && (PLASMA_RANK+step < PLASMA_SIZE) ) { ss_cond_wait(PLASMA_RANK+step, 0, step); work[PLASMA_RANK] = max(work[PLASMA_RANK], work[PLASMA_RANK+step]); lrank = lrank >> 1; step = step << 1; ss_cond_set(PLASMA_RANK, 0, step); } if (PLASMA_RANK > 0) { while( lrank != 0 ) { if (lrank%2 == 1) { ss_cond_set(PLASMA_RANK, 0, step); lrank = 0; } else { lrank = lrank >> 1; step = step << 1; ss_cond_set(PLASMA_RANK, 0, step); } } } if (PLASMA_RANK == 0) *result = work[0]; } else { step = 1; lrank = PLASMA_RANK; while ( (lrank%2 == 0) && (PLASMA_RANK+step < PLASMA_SIZE) ) { double scale1, scale2; double sumsq1, sumsq2; ss_cond_wait(PLASMA_RANK+step, 0, step); scale1 = work[ 2 * PLASMA_RANK ]; sumsq1 = work[ 2 * PLASMA_RANK + 1 ]; scale2 = work[ 2 * (PLASMA_RANK+step) ]; sumsq2 = work[ 2 * (PLASMA_RANK+step) + 1 ]; if ( scale2 != 0. ){ if( scale1 < scale2 ) { work[2 * PLASMA_RANK+1] = sumsq2 + (sumsq1 * (( scale1 / scale2 ) * ( scale1 / scale2 ))); work[2 * PLASMA_RANK ] = scale2; } else { work[2 * PLASMA_RANK+1] = sumsq1 + (sumsq2 * (( scale2 / scale1 ) * ( scale2 / scale1 ))); } } lrank = lrank >> 1; step = step << 1; ss_cond_set(PLASMA_RANK, 0, step); } if (PLASMA_RANK > 0) { while( lrank != 0 ) { if (lrank%2 == 1) { ss_cond_set(PLASMA_RANK, 0, step); lrank = 0; } else { lrank = lrank >> 1; step = step << 1; ss_cond_set(PLASMA_RANK, 0, step); } } } if (PLASMA_RANK == 0) *result = work[0] * sqrt( work[1] ); } ss_finalize(); } /***************************************************************************//** * **/ void plasma_pdlansy_quark(PLASMA_enum norm, PLASMA_enum uplo, PLASMA_desc A, double *work, double *result, PLASMA_sequence *sequence, PLASMA_request *request) { plasma_context_t *plasma; Quark_Task_Flags task_flags = Quark_Task_Flags_Initializer; double* lwork; int X, X1, X2, Y, Y1; int ldam; int m, n, k; int szeW, pos; int nbworker = 1; plasma = plasma_context_self(); if (sequence->status != PLASMA_SUCCESS) return; QUARK_Task_Flag_Set(&task_flags, TASK_SEQUENCE, (intptr_t)sequence->quark_sequence); *result = 0.0; switch ( norm ) { /* * PlasmaMaxNorm */ case PlasmaMaxNorm: szeW = (A.mt*(A.mt+1))/2; pos = 0; lwork = (double *)plasma_shared_alloc(plasma, szeW, PlasmaRealDouble); #pragma omp register ([szeW]lwork) memset(lwork, 0, szeW*sizeof(double)); for(m = 0; m < A.mt; m++) { X1 = m == 0 ? A.i %A.mb : 0; X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; ldam = BLKLDD(A, m); RT_CORE_dlansy_f1( plasma->quark, &task_flags, PlasmaMaxNorm, uplo, X, A(m, m, X1, X1, ldam), ldam, ldam*X, 0, &(lwork[pos]), lwork, szeW); pos++; /* * PlasmaLower */ if (uplo == PlasmaLower) { for(n = 0; n < m; n++) { Y1 = n == 0 ? A.j%A.nb : 0; Y = A.nb - Y1; RT_CORE_dlange_f1( plasma->quark, &task_flags, PlasmaMaxNorm, X, Y, A(m, n, X1, Y1, ldam), ldam, ldam*Y, 0, &(lwork[pos]), lwork, szeW); pos++; } } /* * PlasmaUpper */ else { for(n = m+1; n < A.mt; n++) { Y = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; RT_CORE_dlange_f1( plasma->quark, &task_flags, PlasmaMaxNorm, X, Y, A(m, n, X1, 0, ldam), ldam, ldam*Y, 0, &(lwork[pos]), lwork, szeW); pos++; } } } RT_CORE_dlange( plasma->quark, &task_flags, PlasmaMaxNorm, szeW, 1, lwork, 1, szeW, 0, result); RT_CORE_free(plasma->quark, &task_flags, lwork, szeW*sizeof(double)); break; /* * PlasmaOneNorm / PlasmaInfNorm */ case PlasmaOneNorm: case PlasmaInfNorm: lwork = (double *)plasma_shared_alloc(plasma, (A.m+1), PlasmaRealDouble); #pragma omp register ([A.m+1]lwork) memset(lwork, 0, (A.m+1)*sizeof(double)); for(m = 0; m < A.mt; m++) { X1 = m == 0 ? A.i %A.mb : 0; X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; ldam = BLKLDD(A, m); RT_CORE_dasum_f1( plasma->quark, &task_flags, PlasmaRowwise, uplo, X, X, A(m, m, X1, X1, ldam), ldam, ldam*X, &(lwork[m*A.mb+1]), A.mb, lwork, A.m); /* * PlasmaLower */ if (uplo == PlasmaLower) { for(n = 0; n < m; n++) { Y1 = n == 0 ? A.j%A.nb : 0; Y = A.nb - Y1; RT_CORE_dasum_f1( plasma->quark, &task_flags, PlasmaRowwise, PlasmaUpperLower, X, Y, A(m, n, X1, Y1, ldam), ldam, ldam*Y, &(lwork[m*A.mb+1]), A.mb, lwork, A.m); RT_CORE_dasum_f1( plasma->quark, &task_flags, PlasmaColumnwise, PlasmaUpperLower, X, Y, A(m, n, X1, Y1, ldam), ldam, ldam*Y, &(lwork[n*A.mb+1]), A.mb, lwork, A.m); } } /* * PlasmaUpper */ else { for(n = m+1; n < A.mt; n++) { Y = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; RT_CORE_dasum_f1( plasma->quark, &task_flags, PlasmaRowwise, PlasmaUpperLower, X, Y, A(m, n, X1, 0, ldam), ldam, ldam*Y, &(lwork[m*A.mb+1]), A.mb, lwork, A.m); RT_CORE_dasum_f1( plasma->quark, &task_flags, PlasmaColumnwise, PlasmaUpperLower, X, Y, A(m, n, X1, 0, ldam), ldam, ldam*Y, &(lwork[n*A.mb+1]), A.mb, lwork, A.m); } } } RT_CORE_dlange( plasma->quark, &task_flags, PlasmaMaxNorm, A.m+1, 1, lwork, 1, A.m+1, 0, result); RT_CORE_free(plasma->quark, &task_flags, lwork, (A.m+1)*sizeof(double)); break; /* * PlasmaFrobeniusNorm */ case PlasmaFrobeniusNorm: szeW = 2*(PLASMA_SIZE+1); lwork = (double*)plasma_shared_alloc(plasma, szeW, PlasmaRealDouble); #pragma omp register ([szeW]lwork) for(m = 0; m <= PLASMA_SIZE; m++) { lwork[2*m ] = 0.; lwork[2*m+1] = 1.; } k = 0; for(m = 0; m < A.mt; m++) { X1 = m == 0 ? A.i %A.mb : 0; X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb; X = X2 - X1; ldam = BLKLDD(A, m); k++; nbworker++; RT_CORE_dsyssq_f1( plasma->quark, &task_flags, uplo, X, A(m, m, X1, X1, ldam), ldam, lwork + 2*k, lwork + 2*k + 1, lwork, szeW, OUTPUT | GATHERV ); k = k % PLASMA_SIZE; /* * PlasmaLower */ if (uplo == PlasmaLower) { for(n = 0; n < m; n++) { Y1 = n == 0 ? A.j%A.nb : 0; Y = A.nb - Y1; k++; nbworker++; RT_CORE_dgessq_f1( plasma->quark, &task_flags, X, Y, A(m, n, X1, Y1, ldam), ldam, lwork + 2*k, lwork + 2*k + 1, lwork, szeW, OUTPUT | GATHERV ); RT_CORE_dgessq_f1( plasma->quark, &task_flags, X, Y, A(m, n, X1, Y1, ldam), ldam, lwork + 2*k, lwork + 2*k + 1, lwork, szeW, OUTPUT | GATHERV ); k = k % PLASMA_SIZE; } } /* * PlasmaUpper */ else { for(n = m+1; n < A.mt; n++) { Y = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb; k++; nbworker++; RT_CORE_dgessq_f1( plasma->quark, &task_flags, X, Y, A(m, n, X1, 0, ldam), ldam, lwork + 2*k, lwork + 2*k + 1, lwork, szeW, OUTPUT | GATHERV ); RT_CORE_dgessq_f1( plasma->quark, &task_flags, X, Y, A(m, n, X1, 0, ldam), ldam, lwork + 2*k, lwork + 2*k + 1, lwork, szeW, OUTPUT | GATHERV ); k = k % PLASMA_SIZE; } } } RT_CORE_dplssq( plasma->quark, &task_flags, min(nbworker, PLASMA_SIZE+1), lwork, result ); RT_CORE_free(plasma->quark, &task_flags, lwork, szeW*sizeof(double)); default:; } }
ompthrcount.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char* argv[]) { int thread_count = 0; printf("Test will create many threads and increment a counter...\n"); #pragma omp parallel { #pragma omp atomic thread_count += 1; } printf("Hello OpenMP World just created: %d threads!\n", thread_count); }