source
stringlengths
3
92
c
stringlengths
26
2.25M
DRB016-outputdep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The loop in this example cannot be parallelized. This pattern has two pair of dependencies: 1. loop carried output dependence x = .. : 2. loop carried true dependence due to: .. = x; x = ..; Data race pairs: we allow two pairs to preserve the original code pattern. 1. x@73:12 vs. x@74:5 2. x@74:5 vs. x@74:5 */ #include <stdio.h> int a[100]; int main() { int len=100; int i,x=10; #pragma omp parallel for firstprivate(i ) lastprivate(i ) for (i=0;i<len;i++) { a[i] = x; x=i; } printf("x=%d",x); return 0; }
openmp_demo.c
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Program/openmp_demo: example of user multithreading //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // This demo uses OpenMP, and should work if GraphBLAS is compiled to // use either OpenMP or pthreads to synchronize multiple user threadds. // If OpenMP is not available, this program will work fine without it, in a // single user thread, regardless of the thread mechanism used by GraphBLAS. #include "GraphBLAS.h" #ifdef _OPENMP #include <omp.h> #endif #if defined __INTEL_COMPILER #pragma warning (disable: 58 167 144 177 181 186 188 589 593 869 981 1418 1419 1572 1599 2259 2282 2557 2547 3280 ) #elif defined __GNUC__ #pragma GCC diagnostic ignored "-Wunknown-pragmas" #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" #endif #define NTHREADS 8 #define NTRIALS 10 #define N 6 #define OK(method) \ { \ GrB_Info info = method ; \ if (! (info == GrB_SUCCESS || info == GrB_NO_VALUE)) \ { \ printf ("Failure (id: %d, info: %d): %s\n", \ id, info, GrB_error ( )) ; \ /* return to caller (do not use inside critical section) */ \ return (0) ; \ } \ } //------------------------------------------------------------------------------ // worker //------------------------------------------------------------------------------ int worker (GrB_Matrix *Ahandle, int id) { printf ("\n================= worker %d starts:\n", id) ; fprintf (stderr, "worker %d\n", id) ; OK (GrB_Matrix_new (Ahandle, GrB_FP64, N, N)) ; GrB_Matrix A = *Ahandle ; // worker generates an intentional error message GrB_Matrix_setElement_INT32 (A, 42, 1000+id, 1000+id) ; // print the intentional error generated when the worker started #pragma omp critical { // critical section printf ("\n----------------- worker %d intentional error:\n", id) ; printf ("%s\n", GrB_error ( )) ; } for (int hammer_hard = 0 ; hammer_hard < NTRIALS ; hammer_hard++) { for (int i = 0 ; i < N ; i++) { for (int j = 0 ; j < N ; j++) { double x = (i+1)*100000 + (j+1)*1000 + id ; OK (GrB_Matrix_setElement_FP64 (A, x, i, j)) ; } } // force completion GrB_Index nvals ; OK (GrB_Matrix_nvals (&nvals, A)) ; } // Printing is done in a critical section, just so it is not overly // jumbled. Each matrix and error will print in a single body of text, // but the order of the matrices and errors printed will be out of order // because the critical section does not enforce the order that the // threads enter. GrB_Info info2 ; #pragma omp critical { // critical section printf ("\n----------------- worker %d is done:\n", id) ; info2 = GxB_Matrix_fprint (A, "A", GxB_SHORT, stdout) ; } OK (info2) ; // worker generates an intentional error message GrB_Matrix_setElement_INT32 (A, 42, 1000+id, 1000+id) ; // print the intentional error generated when the worker started // It should be unchanged. #pragma omp critical { // critical section printf ("\n----------------- worker %d error should be same:\n", id) ; printf ("%s\n", GrB_error ( )) ; } return (0) ; } //------------------------------------------------------------------------------ // openmp_demo main program //------------------------------------------------------------------------------ int main (int argc, char **argv) { fprintf (stderr, "Demo: %s:\n", argv [0]) ; printf ("Demo: %s:\n", argv [0]) ; // initialize the mutex int id = -1 ; // start GraphBLAS OK (GrB_init (GrB_NONBLOCKING)) ; int nthreads ; OK (GxB_Global_Option_get (GxB_NTHREADS, &nthreads)) ; fprintf (stderr, "openmp demo, nthreads %d\n", nthreads) ; // Determine which user-threading model is being used. GxB_Thread_Model thread_safety ; GxB_Global_Option_get (GxB_THREAD_SAFETY, &thread_safety) ; printf ("GraphBLAS is using ") ; switch (thread_safety) { case GxB_THREAD_POSIX : printf ("a POSIX pthread mutex\n") ; break ; case GxB_THREAD_WINDOWS : printf ("a Windows CriticalSection\n") ; break ; case GxB_THREAD_ANSI : printf ("an ANSI C11 mtx_lock\n") ; break ; case GxB_THREAD_OPENMP : printf ("an OpenMP critical section\n") ; break ; default : // GxB_THREAD_NONE #ifdef _OPENMP printf ("(nothing! This will fail!)\n") ; #else printf ("nothing (OK since user program is single-threaded)\n") ; #endif break ; } printf ("to synchronize user threads.\n") ; #ifdef _OPENMP printf ("User threads in this program are OpenMP threads.\n") ; #else printf ("This user program is single threaded.\n") ; #endif GrB_Matrix Aarray [NTHREADS] ; // create the threads #pragma omp parallel for num_threads(NTHREADS) for (id = 0 ; id < NTHREADS ; id++) { worker (&Aarray [id], id) ; } // the master thread prints them again, and frees them for (int id = 0 ; id < NTHREADS ; id++) { GrB_Matrix A = Aarray [id] ; printf ("\n---- Master prints matrix %d\n", id) ; OK (GxB_Matrix_fprint (A, "A", GxB_SHORT, stdout)) ; GrB_Matrix_free (&A) ; } // print an error message printf ("\n\n---- Master thread prints an error message:\n") ; GrB_Matrix_new (NULL, GrB_FP64, 1, 1) ; printf ("Error: %s\n", GrB_error ( )) ; // finish GraphBLAS GrB_finalize ( ) ; // finish OpenMP exit (0) ; }
jda.c
#include <math.h> #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> // C99 needed #include "jda.h" #ifdef _MSC_VER #define inline __inline #endif /*! * \brief jda global constance * \param JDA_T number of stages * \param JDA_K number of carts every stage * \param JDA_LANDMARK_N number of landmarks * \param JDA_TREE_DEPTH depth of a cart * \param JDA_TREE_LEAF_N leaf number of a cart * \param JDA_CART_N number of total carts in the model * \param JDA_LANDMARK_DIM dimension of landmarks * \param JDA_LBF_N dimension of local binary feature */ #define JDA_T 5 #define JDA_K 540 #define JDA_LANDMARK_N 27 #define JDA_TREE_DEPTH 4 #define JDA_TREE_LEAF_N (1 << (JDA_TREE_DEPTH - 1)) #define JDA_TREE_NODE_N (JDA_TREE_LEAF_N - 1) #define JDA_CART_N (JDA_T*JDA_K) #define JDA_LANDMARK_DIM (2 * JDA_LANDMARK_N) #define JDA_LBF_N (JDA_K*JDA_TREE_LEAF_N) /*! * \brief A marco based generic vector type for C * \note the vector is only support for operation `insert` * * \usage * 1. define the type * JDA_VECTOR(int); * 2. define the variable * JDA_VECTOR_DEC(int) vector_of_int; * 3. malloc initial memory, no element * JDA_VECTOR_NEW(vector_of_int, size); * 4. insert an element, resize vector if needed * JDA_VECTOR_INSERT(vector_of_int, value); * 5. insert some elements, resize vector if needed * JDA_VECTOR_INSERT_MORE(vector_of_int, values, size) */ #define JDA_VECTOR_DEF(Type) \ struct jdaVector##Type { \ int size; \ int capacity; \ Type *data; \ } #define JDA_VECTOR(Type) \ struct jdaVector##Type #define JDA_VECTOR_NEW(v, size_) \ do { \ (v).size = 0; \ (v).capacity = 2 * size_; \ (v).data = malloc((v).capacity * sizeof((v).data[0])); \ } while (0) #define JDA_VECTOR_INSERT(v, value) \ do { \ (v).size++; \ if ((v).size > (v).capacity) { \ int capacity_new = 2 * (v).capacity; \ (v).data = realloc((v).data, capacity_new * sizeof(value)); \ (v).capacity = capacity_new; \ } \ (v).data[(v).size - 1] = (value); \ } while (0) #define JDA_VECTOR_INSERT_MORE(v, values, size_) \ do { \ int size_new; \ size_new = (v).size + size_; \ if (size_new > (v).capacity) { \ int capacity_new = 2 * (((v).capacity < size_new) ? size_new : (v).capacity); \ (v).data = realloc((v).data, capacity_new * sizeof((values)[0])); \ (v).capacity = capacity_new; \ } \ memcpy(&(v).data[(v).size], values, size_ * sizeof((values)[0])); \ (v).size = size_new; \ } while (0) #define JDA_VECTOR_RELEASE(v) \ do { \ if ((v).data) free((v).data) \ } while (0) #define JDA_VECTOR_DEFAULT_LEN 200 JDA_VECTOR_DEF(int); JDA_VECTOR_DEF(float); // data structures /*! \brief jda bbox */ typedef struct { /*! breif x, y, w, h */ int x, y, size; } jdaBBox; /*! \brief jda shape */ typedef float jdaShape[JDA_LANDMARK_DIM]; /*!\brief jda cart node */ typedef struct { /*! breif scale */ int scale; /*! breif landmark id */ int landmark1_x; int landmark2_x; /*! breif landmark offset to generate feature value */ float landmark1_offset_x; float landmark1_offset_y; float landmark2_offset_x; float landmark2_offset_y; /*! \brief feature threshold */ int th; } jdaNode; /*! \brief jda cart */ typedef struct { /*! \brief nodes in this cart */ jdaNode nodes[JDA_TREE_NODE_N]; /*! \brief scores stored in the leaf nodes */ float score[JDA_TREE_LEAF_N]; /*! \brief score thrshold */ float th; /*! \brief mean and std apply to the score */ float mean, std; } jdaCart; /*! \brief jda cascador */ typedef struct { /*! \brief all carts in the model */ jdaCart carts[JDA_CART_N]; /*! \brief regression weights of every stage */ float ws[JDA_T][JDA_LBF_N][JDA_LANDMARK_DIM]; /*! \brief mean shape of the face */ float mean_shape[JDA_LANDMARK_DIM]; /*! \brief final score threshold */ float th; } jdaCascador; /*! \brief jda image */ typedef struct { /*! \brief width and height */ int w, h; /*! \brief step of a row in the image, usally equals to width */ int step; /*! \brief gray image data */ unsigned char *data; } jdaImage; // Internal Helpers #define JDA_IMAGE_AT(img, x, y) ((img)->data[(y)*(img)->step+(x)]) #define JDA_MAX(x, y) (((x)<(y))?(y):(x)) #define JDA_MIN(x, y) (((x)<(y))?(x):(y)) /*! * \brief create image * \param w w * \param h h * \return image */ static inline jdaImage jdaImageCreate(int w, int h) { jdaImage img; img.w = img.step = w; img.h = h; img.data = (unsigned char*)malloc(w*h*sizeof(unsigned char)); return img; } /*! * \brief release internal data buffer * \note don't release the image which borrow from others * * \param img image to free */ static inline void jdaImageRelease(jdaImage *img) { if (img->data) free(img->data); } /*! * \brief resize image, bilinear interpolation * \param img image * \param w w * \param h h * \return new image with size = (w, h) */ static jdaImage jdaImageResize(jdaImage img, int w, int h) { jdaImage img_ = jdaImageCreate(w, h); float x_ratio = (float)(img.w - 1) / w; float y_ratio = (float)(img.h - 1) / h; int x, y, index; int a, b, c, d; float x_diff, y_diff; int offset = 0; int i, j; for (i = 0; i < h; i++) { for (j = 0; j < w; j++) { x = (int)(x_ratio*j); y = (int)(y_ratio*i); x_diff = (x_ratio*j) - x; y_diff = (y_ratio*i) - y; index = y*img.w + x; a = img.data[index]; b = img.data[index + 1]; c = img.data[index + img.w]; d = img.data[index + img.w + 1]; img_.data[offset++] = (unsigned char)(a*(1.f - x_diff)*(1.f - y_diff) + \ b*(x_diff)*(1.f - y_diff) + \ c*(1.f - x_diff)*(y_diff) + \ d*(x_diff)*(y_diff)); } } return img_; } /*! * \brief nms * \param result jda detection result * \return merged result */ static jdaResult jdaNms(jdaResult result) { float overlap = 0.3f; int n = result.n; float *scores = result.scores; jdaBBox *bboxes = (jdaBBox*)result.bboxes; jdaShape *shapes = (jdaShape*)result.shapes; int *idx = (int*)malloc(n*sizeof(int)); bool *flag = (bool*)malloc(n*sizeof(bool)); int *area = (int*)malloc(n*sizeof(int)); int i, j; for (i = 0; i < n; i++) { idx[i] = i; flag[i] = true; area[i] = bboxes[i].size*bboxes[i].size; } // sort by score for (i = 0; i < n - 1; i++) { for (j = i + 1; j < n; j++) { if (scores[idx[i]] < scores[idx[j]]) { int temp = idx[i]; idx[i] = idx[j]; idx[j] = temp; } } } // merge for (i = 0; i < n - 1; i++) { int k1 = idx[i]; if (flag[k1] == false) continue; for (j = i + 1; j < n; j++) { int k2 = idx[j]; if (flag[k2] == false) continue; int x1 = JDA_MAX(bboxes[k1].x, bboxes[k2].x); int y1 = JDA_MAX(bboxes[k1].y, bboxes[k2].y); int x2 = JDA_MIN(bboxes[k1].x + bboxes[k1].size, bboxes[k2].x + bboxes[k2].size); int y2 = JDA_MIN(bboxes[k1].y + bboxes[k1].size, bboxes[k2].y + bboxes[k2].size); int w = JDA_MAX(0, x2 - x1); int h = JDA_MAX(0, y2 - y1); float ov = (float)(w*h) / (float)(area[k1] + area[k2] - w*h); if (ov > overlap) { flag[k2] = false; } } } // move jdaResult merged; JDA_VECTOR(int) merged_bboxes; JDA_VECTOR(float) merged_shapes; JDA_VECTOR(float) merged_scores; JDA_VECTOR_NEW(merged_scores, n); JDA_VECTOR_NEW(merged_bboxes, n * 3); JDA_VECTOR_NEW(merged_shapes, n * JDA_LANDMARK_DIM); for (i = 0; i < n; i++) { if (flag[i] == true) { JDA_VECTOR_INSERT(merged_scores, scores[i]); JDA_VECTOR_INSERT_MORE(merged_bboxes, (int*)&bboxes[i], 3); JDA_VECTOR_INSERT_MORE(merged_shapes, (float*)&shapes[i], JDA_LANDMARK_DIM); } } merged.n = merged_scores.size; merged.landmark_n = JDA_LANDMARK_N; merged.bboxes = merged_bboxes.data; // transfer memory merged.shapes = merged_shapes.data; // transfer memory merged.scores = merged_scores.data; // transfer memory free(flag); free(area); free(idx); // free previous result free(result.scores); free(result.shapes); free(result.bboxes); return merged; } static jdaResult jdaInternalDetect(jdaCascador *cascador, jdaImage o, jdaImage h, jdaImage q, \ float scale, float step, int min_size, int max_size, float th) { int win_size = 24; // fixed initial window size max_size = JDA_MIN(max_size, o.w); max_size = JDA_MIN(max_size, o.h); JDA_VECTOR(int) bboxes; JDA_VECTOR(float) shapes; JDA_VECTOR(float) scores; JDA_VECTOR_NEW(scores, JDA_VECTOR_DEFAULT_LEN); JDA_VECTOR_NEW(bboxes, JDA_VECTOR_DEFAULT_LEN * 3); JDA_VECTOR_NEW(shapes, JDA_VECTOR_DEFAULT_LEN * JDA_LANDMARK_DIM); while (win_size < min_size) win_size *= scale; for (; win_size <= max_size; win_size *= scale) { const int step = (int)(win_size*0.1f); const int x_max = o.w - win_size; const int y_max = o.h - win_size; int x, y; for (y = 0; y <= y_max; y += step) { for (x = 0; x <= x_max; x += step) { jdaImage ps[3]; const float r = 1.f / sqrtf(2.f); ps[0].w = ps[0].h = win_size; ps[0].step = o.step; ps[0].data = &o.data[y*o.step + x]; // borrow memory int h_x = (int)(x*r); int h_y = (int)(y*r); ps[1].w = ps[1].h = win_size; ps[1].step = h.step; ps[1].data = &h.data[h_y*h.step + h_x]; // borrow memory int q_x = x / 2; int q_y = y / 2; ps[2].w = ps[2].h = win_size; ps[2].step = q.step; ps[2].data = &q.data[q_y*q.step + q_x]; // borrow memory // validate jdaShape shape; float score = 0.f; int lbf[JDA_K]; jdaCart *cart = cascador->carts; memcpy(shape, cascador->mean_shape, JDA_LANDMARK_DIM*sizeof(float)); int t, k, i; // stages for (t = 0; t < JDA_T; t++) { // carts for (k = 0; k < JDA_K; k++) { // nodes int node_idx = 0; for (i = 0; i < JDA_TREE_DEPTH - 1; i++) { jdaNode *node = &cart->nodes[node_idx]; int landmark1 = node->landmark1_x; int landmark2 = node->landmark2_x; float x1 = shape[landmark1] + node->landmark1_offset_x; float y1 = shape[landmark1 + 1] + node->landmark1_offset_y; float x2 = shape[landmark2] + node->landmark2_offset_x; float y2 = shape[landmark2 + 1] + node->landmark2_offset_y; jdaImage *p = ps + node->scale; int x1_ = (int)(x1*p->w); int y1_ = (int)(y1*p->w); int x2_ = (int)(x2*p->w); int y2_ = (int)(y2*p->w); if (x1_ < 0) x1_ = 0; else if (x1_ >= p->w) x1_ = p->w - 1; if (x2_ < 0) x2_ = 0; else if (x2_ >= p->w) x2_ = p->w - 1; if (y1_ < 0) y1_ = 0; else if (y1_ >= p->w) y1_ = p->w - 1; if (y2_ < 0) y2_ = 0; else if (y2_ >= p->w) y2_ = p->w - 1; int feature = (int)JDA_IMAGE_AT(p, x1_, y1_) - (int)JDA_IMAGE_AT(p, x2_, y2_); if (feature <= node->th) node_idx = 2 * node_idx + 1; else node_idx = 2 * node_idx + 2; } int leaf_idx = node_idx - JDA_TREE_NODE_N; score += cart->score[leaf_idx]; score = (score - cart->mean) / cart->std; // not a face if (score < cart->th) goto next; lbf[k] = k*JDA_TREE_LEAF_N + leaf_idx; cart++; } // regression jdaShape *ws = cascador->ws[t]; for (k = 0; k < JDA_K; k++) { float *w = ws[lbf[k]]; for (i = 0; i < JDA_LANDMARK_DIM; i += 2) { shape[i] += w[i]; shape[i + 1] += w[i + 1]; } } } // final threshold if (score < th) goto next; jdaBBox bbox; bbox.x = x; bbox.y = y; bbox.size = win_size; // may use openmp #pragma omp critical { JDA_VECTOR_INSERT(scores, score); JDA_VECTOR_INSERT_MORE(bboxes, (int*)&bbox, 3); JDA_VECTOR_INSERT_MORE(shapes, shape, JDA_LANDMARK_DIM); } next:; } } } jdaResult result; result.n = scores.size; result.landmark_n = JDA_LANDMARK_N; result.bboxes = bboxes.data; // transfer memory result.shapes = shapes.data; // transfer memory result.scores = scores.data; // transfer memory return result; } // APIs jdaResult jdaDetect(void *cascador, unsigned char *data, int width, int height, \ float scale, float step, int min_size, int max_size, float th) { jdaImage o, h, q; o.w = o.step = width; o.h = height; o.data = data; // borrow memory float r = 1.f / sqrtf(2.f); h.w = (int)(width*r); h.h = (int)(height*r); h = jdaImageResize(o, h.w, h.h); q.w = width / 2; q.h = height / 2; q = jdaImageResize(o, q.w, q.h); min_size = JDA_MAX(min_size, 24); if (max_size <= 0) max_size = JDA_MIN(o.w, o.h); jdaResult result = jdaInternalDetect((jdaCascador*)cascador, o, h, q, scale, \ step, min_size, max_size, th); jdaResult merged = jdaNms(result); int i, j; for (i = 0; i < merged.n; i++) { int x = merged.bboxes[3 * i]; int y = merged.bboxes[3 * i + 1]; int size = merged.bboxes[3 * i + 2]; float *shape = &merged.shapes[i*JDA_LANDMARK_DIM]; for (j = 0; j < JDA_LANDMARK_N; j++) { shape[2 * j] = shape[2 * j] * size + x; shape[2 * j + 1] = shape[2 * j + 1] * size + y; } } jdaImageRelease(&h); jdaImageRelease(&q); return merged; } /*! * \brief serialize model from JDA * \note JDA dump data type is double */ void *jdaCascadorCreateDouble(const char *model) { FILE *fin = fopen(model, "rb"); if (!fin) return NULL; jdaCascador *cascador = (jdaCascador*)malloc(sizeof(jdaCascador)); if (!cascador) { fclose(fin); return NULL; } int i4; double f8; int t, k, i, j; // meta fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); // mean shape for (i = 0; i < JDA_LANDMARK_DIM; i++) { fread(&f8, sizeof(double), 1, fin); cascador->mean_shape[i] = (float)f8; } // carts for (t = 0; t < JDA_T; t++) { for (k = 0; k < JDA_K; k++) { jdaCart *cart = &cascador->carts[t*JDA_K + k]; // feature for (i = 0; i < JDA_TREE_NODE_N; i++) { jdaNode *node = &cart->nodes[i]; fread(&i4, sizeof(int), 1, fin); node->scale = i4; fread(&i4, sizeof(int), 1, fin); node->landmark1_x = i4 << 1; fread(&i4, sizeof(int), 1, fin); node->landmark2_x = i4 << 1; fread(&f8, sizeof(double), 1, fin); node->landmark1_offset_x = (float)f8; fread(&f8, sizeof(double), 1, fin); node->landmark1_offset_y = (float)f8; fread(&f8, sizeof(double), 1, fin); node->landmark2_offset_x = (float)f8; fread(&f8, sizeof(double), 1, fin); node->landmark2_offset_y = (float)f8; fread(&i4, sizeof(int), 1, fin); node->th = i4; } // scores for (i = 0; i < JDA_TREE_LEAF_N; i++) { fread(&f8, sizeof(double), 1, fin); cart->score[i] = (float)f8; } // classificatio threshold fread(&f8, sizeof(double), 1, fin); cart->th = (float)f8; fread(&f8, sizeof(double), 1, fin); cart->mean = (float)f8; fread(&f8, sizeof(double), 1, fin); cart->std = (float)f8; } // global regression weight for (i = 0; i < JDA_LBF_N; i++) { for (j = 0; j < JDA_LANDMARK_DIM; j++) { fread(&f8, sizeof(double), 1, fin); cascador->ws[t][i][j] = (float)f8; } } } fread(&i4, sizeof(int), 1, fin); fclose(fin); // set final score threshold, this can be changed cascador->th = 0; return (void*)cascador; } void *jdaCascadorCreateFloat(const char *model) { FILE *fin = fopen(model, "rb"); if (!fin) return NULL; jdaCascador *cascador = (jdaCascador*)malloc(sizeof(jdaCascador)); if (!cascador) { fclose(fin); return NULL; } int i4; float f4; int t, k, i, j; // meta fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); fread(&i4, sizeof(int), 1, fin); // mean shape for (i = 0; i < JDA_LANDMARK_DIM; i++) { fread(&f4, sizeof(float), 1, fin); cascador->mean_shape[i] = f4; } // carts for (t = 0; t < JDA_T; t++) { for (k = 0; k < JDA_K; k++) { jdaCart *cart = &cascador->carts[t*JDA_K + k]; // feature for (i = 0; i < JDA_TREE_NODE_N; i++) { jdaNode *node = &cart->nodes[i]; fread(&i4, sizeof(int), 1, fin); node->scale = i4; fread(&i4, sizeof(int), 1, fin); node->landmark1_x = i4 << 1; fread(&i4, sizeof(int), 1, fin); node->landmark2_x = i4 << 1; fread(&f4, sizeof(float), 1, fin); node->landmark1_offset_x = f4; fread(&f4, sizeof(float), 1, fin); node->landmark1_offset_y = f4; fread(&f4, sizeof(float), 1, fin); node->landmark2_offset_x = f4; fread(&f4, sizeof(float), 1, fin); node->landmark2_offset_y = f4; fread(&i4, sizeof(int), 1, fin); node->th = i4; } // scores for (i = 0; i < JDA_TREE_LEAF_N; i++) { fread(&f4, sizeof(float), 1, fin); cart->score[i] = f4; } // classificatio threshold fread(&f4, sizeof(float), 1, fin); cart->th = f4; fread(&f4, sizeof(float), 1, fin); cart->mean = f4; fread(&f4, sizeof(float), 1, fin); cart->std = f4; } // global regression weight for (i = 0; i < JDA_LBF_N; i++) { for (j = 0; j < JDA_LANDMARK_DIM; j++) { fread(&f4, sizeof(float), 1, fin); cascador->ws[t][i][j] = f4; } } } fread(&i4, sizeof(int), 1, fin); fclose(fin); // set final score threshold, this can be changed cascador->th = 0; return (void*)cascador; } /*! * \brief serialize model to a binary file * \note this function serialze float data type, can reduce model size */ void jdaCascadorSerializeTo(void *cascador_, const char *model) { FILE *fout = fopen(model, "wb"); if (!fout) return; jdaCascador *cascador = (jdaCascador*)cascador_; int i4; float f4; int t, k, i, j; // meta i4 = 0; // mask fwrite(&i4, sizeof(int), 1, fout); i4 = JDA_T; fwrite(&i4, sizeof(int), 1, fout); i4 = JDA_K; fwrite(&i4, sizeof(int), 1, fout); i4 = JDA_LANDMARK_N; fwrite(&i4, sizeof(int), 1, fout); i4 = JDA_TREE_DEPTH; fwrite(&i4, sizeof(int), 1, fout); i4 = JDA_T + 1; fwrite(&i4, sizeof(int), 1, fout); i4 = -1; fwrite(&i4, sizeof(int), 1, fout); // mean shape fwrite(cascador->mean_shape, sizeof(float), JDA_LANDMARK_DIM, fout); // carts for (t = 0; t < JDA_T; t++) { for (k = 0; k < JDA_K; k++) { jdaCart *cart = &cascador->carts[t*JDA_K + k]; // feature for (i = 0; i < JDA_TREE_NODE_N; i++) { jdaNode *node = &cart->nodes[i]; i4 = node->scale; fwrite(&i4, sizeof(int), 1, fout); i4 = node->landmark1_x >> 1; fwrite(&i4, sizeof(int), 1, fout); i4 = node->landmark2_x >> 1; fwrite(&i4, sizeof(int), 1, fout); f4 = node->landmark1_offset_x; fwrite(&f4, sizeof(float), 1, fout); f4 = node->landmark1_offset_y; fwrite(&f4, sizeof(float), 1, fout); f4 = node->landmark2_offset_x; fwrite(&f4, sizeof(float), 1, fout); f4 = node->landmark2_offset_y; fwrite(&f4, sizeof(float), 1, fout); i4 = node->th; fwrite(&i4, sizeof(int), 1, fout); } // scores for (i = 0; i < JDA_TREE_LEAF_N; i++) { f4 = cart->score[i]; fwrite(&f4, sizeof(float), 1, fout); } // classificatio threshold f4 = cart->th; fwrite(&f4, sizeof(float), 1, fout); f4 = cart->mean; fwrite(&f4, sizeof(float), 1, fout); f4 = cart->std; fwrite(&f4, sizeof(float), 1, fout); } // global regression weight for (i = 0; i < JDA_LBF_N; i++) { for (j = 0; j < JDA_LANDMARK_DIM; j++) { f4 = cascador->ws[t][i][j]; fwrite(&f4, sizeof(float), 1, fout); } } } i4 = 0; // mask fwrite(&i4, sizeof(int), 1, fout); fclose(fout); } void jdaCascadorRelease(void *cascador) { if (cascador) free((jdaCascador*)cascador); } void jdaResultRelease(jdaResult result) { // free vector's internal buff if (result.bboxes) free(result.bboxes); if (result.shapes) free(result.shapes); if (result.scores) free(result.scores); }
trmv_x_bsr_u_hi_trans.c
#include "alphasparse/kernel.h" #ifdef _OPENMP #include<omp.h> #endif #include"alphasparse/opt.h" #include<string.h> #include <stdio.h> #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { ALPHA_INT bs = A->block_size; ALPHA_INT m_inner = A->rows; ALPHA_INT n_inner = A->cols; if(m_inner != n_inner) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_INT partition[thread_num + 1]; balanced_partition_row_by_nnz(A->rows_end, m_inner, thread_num, partition); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT local_m_s = partition[tid]; const ALPHA_INT local_m_e = partition[tid + 1]; tmp[tid] = (ALPHA_Number*)malloc(sizeof(ALPHA_Number)*n_inner*bs); memset(tmp[tid], 0, sizeof(ALPHA_Number)*n_inner*bs); if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR){ for (ALPHA_INT i = local_m_s; i < local_m_e; i++){ ALPHA_INT col = i*bs; ALPHA_INT block_start = A->rows_start[i], block_end = A->rows_end[i]; ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], i) - A->col_indx; for(ALPHA_INT ai = upper_start; ai < block_end; ai++){ ALPHA_INT row = A->col_indx[ai]; ALPHA_INT m_s = row*bs; if (row == i){ for (ALPHA_INT s = 0; s < bs * bs; s = s + bs){ for(ALPHA_INT st = s + s / bs + 1; st < s+bs; st++){ alpha_madde(tmp[tid][m_s+st-s], A->values[st+ai*bs*bs], x[col+s/bs]); } } }else{ for (ALPHA_INT s = 0; s < bs*bs; s=s+bs){ for(ALPHA_INT st = s; st < s+bs; st++){ alpha_madde(tmp[tid][m_s+st-s], A->values[st+ai*bs*bs], x[col+s/bs]); } } } } } }else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR){ for (ALPHA_INT i = local_m_s; i < local_m_e; i++){ ALPHA_INT col = i*bs; ALPHA_INT block_start = A->rows_start[i], block_end = A->rows_end[i]; ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], i) - A->col_indx; for (ALPHA_INT ai = upper_start; ai < block_end; ai++){ ALPHA_INT row = A->col_indx[ai]; ALPHA_INT m_s = row*bs; if (row < i){ continue; }else if (row == i){ for (ALPHA_INT s = 0; s < bs*bs; s=s+bs){ for(ALPHA_INT st = s; st < s+s/bs; st++){ alpha_madde(tmp[tid][m_s+s/bs], A->values[st+ai*bs*bs], x[col+st-s]); } } }else{ for (ALPHA_INT s = 0; s < bs*bs; s=s+bs){ for(ALPHA_INT st = s; st < s+bs; st++){ alpha_madde(tmp[tid][m_s+s/bs], A->values[st+ai*bs*bs], x[col+st-s]); } } } } } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < n_inner*bs; ++i){ ALPHA_Number tmp_y; alpha_setzero(tmp_y); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(tmp_y, tmp_y, tmp[j][i]); } alpha_mul(y[i], y[i], beta); alpha_madde(y[i], tmp_y, alpha); alpha_madde(y[i], x[i], alpha); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < thread_num; ++i) { free(tmp[i]); } free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; }
eppsteinPAR.h
#pragma once #ifndef BRONKERBOSCHEPPSTEINPAR_H #define BRONKERBOSCHEPPSTEINPAR_H #include "../general.h" #include <ctime> #include <gms/algorithms/preprocessing/preprocessing.h> #include "../sequential/tomita.h" #include <gms/third_party/fast_range.h> #include <gms/third_party/fast_statistics.h> #include <parallel/algorithm> #include <gms/common/papi/papiw.h> namespace BkEppsteinPar { template <class SGraph, class Set = typename SGraph::Set> std::vector<Set> mceBench(const SGraph &rgraph, const pvector<NodeId> &ordering) { #ifdef BK_COUNT BK_CLIQUE_COUNTER = 0; //initialize counter #endif auto vCount = rgraph.num_nodes(); std::vector<Set> sol = {}; PAPIW::INIT_PARALLEL(PAPI_RES_STL, PAPI_TOT_CYC); // Init PAPIW #pragma omp parallel shared(rgraph, sol, ordering) { PAPIW::START(); #pragma omp for schedule(dynamic) for (NodeId v = 0; v < vCount; v++) { auto &neigh = rgraph.out_neigh(v); Set cand = {}; Set fini = {}; Set Q(v); for (auto w : neigh) { if (ordering[w] > ordering[v]) cand.union_inplace(w); else fini.union_inplace(w); } BkTomita::expand(cand, fini, Q, sol, rgraph); } PAPIW::STOP(); } PAPIW::PRINT(); return sol; } template <const auto Order, class SGraph, class Set = typename SGraph::Set> std::vector<Set> mce(const SGraph &rgraph) { #ifdef BK_COUNT BK_CLIQUE_COUNTER = 0; //initialize counter #endif pvector<NodeId> ordering(rgraph.num_nodes()); Order(rgraph, ordering); return mceBench(rgraph, ordering); } } // namespace BkEppsteinPar #endif /*BRONKERBOSCHEPPSTEIN_H*/
SGemmR_TN.h
#pragma once #include <arm_neon.h> namespace MAI { namespace Test { template<bool rowMajor, bool transA, bool transB> void sgemm(int M, int N, int K, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc); template<bool rowMajor, bool transA, bool transB> void sgemm_op(int M, int N, int K, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc); template<> void sgemm<true, true, false>(int M, int N, int K, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { C[m * ldc + n] *= beta; for (int k = 0; k < K; ++k) { C[m * ldc + n] += alpha * A[k * lda + m] * B[k * ldb + n]; } } } } void sgemm_rtn_tile_444( float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { for (int m = 0; m < 4; ++m) { for (int n = 0; n < 4; ++n) { C[m * ldc + n] *= beta; for (int k = 0; k < 4; ++k) { C[m * ldc + n] += alpha * A[k * lda + m] * B[k * ldb + n]; } } } } void sgemm_rtn_tile_444_neon( float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { float32x4_t va = vdupq_n_f32(alpha); float32x4_t vb = vdupq_n_f32(beta); float32x4_t a0 = vld1q_f32(A + lda * 0); float32x4_t a1 = vld1q_f32(A + lda * 1); float32x4_t a2 = vld1q_f32(A + lda * 2); float32x4_t a3 = vld1q_f32(A + lda * 3); float32x4_t b0 = vld1q_f32(B + ldb * 0); float32x4_t b1 = vld1q_f32(B + ldb * 1); float32x4_t b2 = vld1q_f32(B + ldb * 2); float32x4_t b3 = vld1q_f32(B + ldb * 3); float32x4_t c0 = vld1q_f32(C + ldc * 0); float32x4_t c1 = vld1q_f32(C + ldc * 1); float32x4_t c2 = vld1q_f32(C + ldc * 2); float32x4_t c3 = vld1q_f32(C + ldc * 3); #define SGEMM_RTN_444_MUL(index) \ c##index = vmulq_f32(c##index, vb); \ c##index = vfmaq_laneq_f32(c##index, b0, a0, index); \ c##index = vfmaq_laneq_f32(c##index, b1, a1, index); \ c##index = vfmaq_laneq_f32(c##index, b2, a2, index); \ c##index = vfmaq_laneq_f32(c##index, b3, a3, index); \ SGEMM_RTN_444_MUL(0); SGEMM_RTN_444_MUL(1); SGEMM_RTN_444_MUL(2); SGEMM_RTN_444_MUL(3); vst1q_f32(C + 0 * ldc, c0); vst1q_f32(C + 1 * ldc, c1); vst1q_f32(C + 2 * ldc, c2); vst1q_f32(C + 3 * ldc, c3); #undef SGEMM_RTN_444_MUL } void sgemm_rtn_tile_448_neon( float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { float32x4_t va = vdupq_n_f32(alpha); float32x4_t vb = vdupq_n_f32(beta); float32x4_t a0 = vld1q_f32(A + lda * 0); float32x4_t a1 = vld1q_f32(A + lda * 1); float32x4_t a2 = vld1q_f32(A + lda * 2); float32x4_t a3 = vld1q_f32(A + lda * 3); float32x4_t a4 = vld1q_f32(A + lda * 4); float32x4_t a5 = vld1q_f32(A + lda * 5); float32x4_t a6 = vld1q_f32(A + lda * 6); float32x4_t a7 = vld1q_f32(A + lda * 7); float32x4_t b0 = vld1q_f32(B + ldb * 0); float32x4_t b1 = vld1q_f32(B + ldb * 1); float32x4_t b2 = vld1q_f32(B + ldb * 2); float32x4_t b3 = vld1q_f32(B + ldb * 3); float32x4_t b4 = vld1q_f32(B + ldb * 4); float32x4_t b5 = vld1q_f32(B + ldb * 5); float32x4_t b6 = vld1q_f32(B + ldb * 6); float32x4_t b7 = vld1q_f32(B + ldb * 7); float32x4_t c0 = vld1q_f32(C + ldc * 0); float32x4_t c1 = vld1q_f32(C + ldc * 1); float32x4_t c2 = vld1q_f32(C + ldc * 2); float32x4_t c3 = vld1q_f32(C + ldc * 3); #define SGEMM_RTN_448_MUL(index) \ c##index = vmulq_f32(c##index, vb); \ c##index = vfmaq_laneq_f32(c##index, b0, a0, index); \ c##index = vfmaq_laneq_f32(c##index, b1, a1, index); \ c##index = vfmaq_laneq_f32(c##index, b2, a2, index); \ c##index = vfmaq_laneq_f32(c##index, b3, a3, index); \ c##index = vfmaq_laneq_f32(c##index, b4, a4, index); \ c##index = vfmaq_laneq_f32(c##index, b5, a5, index); \ c##index = vfmaq_laneq_f32(c##index, b6, a6, index); \ c##index = vfmaq_laneq_f32(c##index, b7, a7, index); \ SGEMM_RTN_448_MUL(0); SGEMM_RTN_448_MUL(1); SGEMM_RTN_448_MUL(2); SGEMM_RTN_448_MUL(3); vst1q_f32(C + 0 * ldc, c0); vst1q_f32(C + 1 * ldc, c1); vst1q_f32(C + 2 * ldc, c2); vst1q_f32(C + 3 * ldc, c3); #undef SGEMM_RTN_448_MUL } void sgemm_rtn_block( int MB, int NB, int KB, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { constexpr int MT = 4; constexpr int NT = 4; constexpr int KT = 8; for (int k = 0; k < KB; k += KT) { for (int m = 0; m < MB; m += MT) { for (int n = 0; n < NB; n += NT) { sgemm_rtn_tile_448_neon( alpha, A + k * lda + m, lda, B + k * ldb + n, ldb, beta, C + m * ldc + n, ldc); } } } } void sgemm_rtn_block_packed_A_packed_B( int MB, int NB, int KB, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { constexpr int MT = 4; constexpr int NT = 4; constexpr int KT = 8; //float packedA[8 * 64]; float packedB[8 * 64]; for (int k = 0; k < KB; k += KT) { pack(packedB, B + k * ldb, ldb, KT, NT, KT, 64); for (int m = 0; m < MB; m += MT) { for (int n = 0; n < NB; n += NT) { sgemm_rtn_tile_448_neon( alpha, A + k * lda + m, lda, // B + k * ldb + n, // ldb, packedB + n * KT, NT, beta, C + m * ldc + n, ldc); } } } } template<> void sgemm_op<true, true, false>( int M, int N, int K, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc) { constexpr int MB = 64; constexpr int NB = 64; constexpr int KB = 64; int MBCount = M / MB; int NBCount = N / NB; int KBCount = K / KB; #pragma omp parallel for schedule(dynamic) collapse(2) for (int m = 0; m < MBCount; ++m) { for (int n = 0; n < NBCount; ++n) { for (int k = 0; k < KBCount; ++k) { sgemm_rtn_block_packed_A_packed_B( MB, NB, KB, alpha, A + (k * KB * lda + m * MB), lda, B + (k * KB * ldb + n * NB), ldb, beta, C + (m * MB * ldc + n * NB), ldc); } } } } } // namespace Test } // namespace MAI
fs_csr_inspector.h
#include<vector> #include <cassert> #include<set> // Makes an edge inside dependence graph inline void connect(int v, int w, std::vector<std::vector<int>> &DAG){ DAG[v].push_back( w ); } /* ****** Inspector for level set parallelization of Forward Solve CSC's outer most loop */ void fs_csr_inspector(int n, int* Lp, int* Li, std::vector<std::vector<int>> &DAG){ // int In_2, In_4, Out_2; // Inspector #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 < Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].push_back(Out_2); } } } #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 > Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].push_back(Out_2); } } } } /* ****** Inspector for level set parallelization of Forward Solve CSC's outer most loop */ void fs_csr_inspector(int n, int* Lp, int* Li, std::vector<std::set<int>> &DAG){ // int In_2, In_4, Out_2; // Inspector #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 < Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].insert(Out_2); } } } // Inspector #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 > Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].insert(Out_2); } } } }
omp_par_in_loop.c
// RUN: %libomp-compile-and-run // #include <stdlib.h> #include <stdio.h> #include <math.h> #include <omp.h> #define TYPE long #define MAX_ITER (TYPE)((TYPE)1000000) #define EVERY (TYPE)((TYPE)100000) int main(int argc, char* argv[]) { TYPE x = MAX_ITER; omp_set_max_active_levels(2); omp_set_num_threads(2); #pragma omp parallel for schedule(nonmonotonic:dynamic,1) for (TYPE i = 0; i < x; i++) { int tid = omp_get_thread_num(); omp_set_num_threads(1); #pragma omp parallel proc_bind(spread) { if (i % EVERY == (TYPE)0) printf("Outer thread %d at iter %ld\n", tid, i); } } printf("passed\n"); return 0; }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/ASTFwd.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents the 'sizes' clause in the '#pragma omp tile' directive. /// /// \code /// #pragma omp tile sizes(5,5) /// for (int i = 0; i < 64; ++i) /// for (int j = 0; j < 64; ++j) /// \endcode class OMPSizesClause final : public OMPClause, private llvm::TrailingObjects<OMPSizesClause, Expr *> { friend class OMPClauseReader; friend class llvm::TrailingObjects<OMPSizesClause, Expr *>; /// Location of '('. SourceLocation LParenLoc; /// Number of tile sizes in the clause. unsigned NumSizes; /// Build an empty clause. explicit OMPSizesClause(int NumSizes) : OMPClause(llvm::omp::OMPC_sizes, SourceLocation(), SourceLocation()), NumSizes(NumSizes) {} public: /// Build a 'sizes' AST node. /// /// \param C Context of the AST. /// \param StartLoc Location of the 'sizes' identifier. /// \param LParenLoc Location of '('. /// \param EndLoc Location of ')'. /// \param Sizes Content of the clause. static OMPSizesClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Sizes); /// Build an empty 'sizes' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumSizes Number of items in the clause. static OMPSizesClause *CreateEmpty(const ASTContext &C, unsigned NumSizes); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the number of list items. unsigned getNumSizes() const { return NumSizes; } /// Returns the tile size expressions. MutableArrayRef<Expr *> getSizesRefs() { return MutableArrayRef<Expr *>(static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } ArrayRef<Expr *> getSizesRefs() const { return ArrayRef<Expr *>(static_cast<const OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } /// Sets the tile size expressions. void setSizesRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumSizes); std::copy(VL.begin(), VL.end(), static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>()); } child_range children() { MutableArrayRef<Expr *> Sizes = getSizesRefs(); return child_range(reinterpret_cast<Stmt **>(Sizes.begin()), reinterpret_cast<Stmt **>(Sizes.end())); } const_child_range children() const { ArrayRef<Expr *> Sizes = getSizesRefs(); return const_child_range(reinterpret_cast<Stmt *const *>(Sizes.begin()), reinterpret_cast<Stmt *const *>(Sizes.end())); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_sizes; } }; /// Representation of the 'full' clause of the '#pragma omp unroll' directive. /// /// \code /// #pragma omp unroll full /// for (int i = 0; i < 64; ++i) /// \endcode class OMPFullClause final : public OMPClause { friend class OMPClauseReader; /// Build an empty clause. explicit OMPFullClause() : OMPClause(llvm::omp::OMPC_full, {}, {}) {} public: /// Build an AST node for a 'full' clause. /// /// \param C Context of the AST. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPFullClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Build an empty 'full' AST node for deserialization. /// /// \param C Context of the AST. static OMPFullClause *CreateEmpty(const ASTContext &C); child_range children() { return {child_iterator(), child_iterator()}; } const_child_range children() const { return {const_child_iterator(), const_child_iterator()}; } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_full; } }; /// Representation of the 'partial' clause of the '#pragma omp unroll' /// directive. /// /// \code /// #pragma omp unroll partial(4) /// for (int i = start; i < end; ++i) /// \endcode class OMPPartialClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Optional argument to the clause (unroll factor). Stmt *Factor; /// Build an empty clause. explicit OMPPartialClause() : OMPClause(llvm::omp::OMPC_partial, {}, {}) {} /// Set the unroll factor. void setFactor(Expr *E) { Factor = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build an AST node for a 'partial' clause. /// /// \param C Context of the AST. /// \param StartLoc Location of the 'partial' identifier. /// \param LParenLoc Location of '('. /// \param EndLoc Location of ')'. /// \param Factor Clause argument. static OMPPartialClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Factor); /// Build an empty 'partial' AST node for deserialization. /// /// \param C Context of the AST. static OMPPartialClause *CreateEmpty(const ASTContext &C); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the argument of the clause or nullptr if not set. Expr *getFactor() const { return cast_or_null<Expr>(Factor); } child_range children() { return child_range(&Factor, &Factor + 1); } const_child_range children() const { return const_child_range(&Factor, &Factor + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_partial; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Pair of Expression and Non-contiguous pair associated with the /// component. llvm::PointerIntPair<Expr *, 1, bool> AssociatedExpressionNonContiguousPr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration, bool IsNonContiguous) : AssociatedExpressionNonContiguousPr(AssociatedExpression, IsNonContiguous), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpressionNonContiguousPr.getPointer(); } bool isNonContiguous() const { return AssociatedExpressionNonContiguousPr.getInt(); } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// Whether this clause is possible to have user-defined mappers associated. /// It should be true for map, to, and from clauses, and false for /// use_device_ptr and is_device_ptr. const bool SupportsMapper; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param SupportsMapper Indicates whether this clause is possible to have /// user-defined mappers associated. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeArrayRef<Expr *>( static_cast<const T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Whether this clause is possible to have user-defined mappers associated. const bool SupportsMapper; // The user-defined mapper associated with the current declaration. ArrayRef<Expr *>::iterator MapperCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), SupportsMapper(SupportsMapper), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; if (SupportsMapper) MapperCur = Mappers.begin(); } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components, SupportsMapper, Mappers) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; if (SupportsMapper) ++MapperCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); const ValueDecl *Mapper = nullptr; if (SupportsMapper && *MapperCur) Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl()); return std::make_tuple( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize), Mapper); } std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; if (SupportsMapper) ++MapperCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end()), SupportsMapper, llvm::None); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'to' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'to' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'from' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'from' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents the 'init' clause in '#pragma omp ...' directives. /// /// \code /// #pragma omp interop init(target:obj) /// \endcode class OMPInitClause final : public OMPVarListClause<OMPInitClause>, private llvm::TrailingObjects<OMPInitClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of interop variable. SourceLocation VarLoc; bool IsTarget = false; bool IsTargetSync = false; void setInteropVar(Expr *E) { varlist_begin()[0] = E; } void setIsTarget(bool V) { IsTarget = V; } void setIsTargetSync(bool V) { IsTargetSync = V; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } /// Build 'init' clause. /// /// \param IsTarget Uses the 'target' interop-type. /// \param IsTargetSync Uses the 'targetsync' interop-type. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. /// \param N Number of expressions. OMPInitClause(bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, StartLoc, LParenLoc, EndLoc, N), VarLoc(VarLoc), IsTarget(IsTarget), IsTargetSync(IsTargetSync) {} /// Build an empty clause. OMPInitClause(unsigned N) : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, SourceLocation(), SourceLocation(), SourceLocation(), N) { } public: /// Creates a fully specified clause. /// /// \param C AST context. /// \param InteropVar The interop variable. /// \param PrefExprs The list of preference expressions. /// \param IsTarget Uses the 'target' interop-type. /// \param IsTargetSync Uses the 'targetsync' interop-type. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. static OMPInitClause *Create(const ASTContext &C, Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Creates an empty clause with \a N expressions. /// /// \param C AST context. /// \param N Number of expression items. static OMPInitClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() { return varlist_begin()[0]; } const Expr *getInteropVar() const { return varlist_begin()[0]; } /// Returns true is interop-type 'target' is used. bool getIsTarget() const { return IsTarget; } /// Returns true is interop-type 'targetsync' is used. bool getIsTargetSync() const { return IsTargetSync; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInitClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } using prefs_iterator = MutableArrayRef<Expr *>::iterator; using const_prefs_iterator = ArrayRef<const Expr *>::iterator; using prefs_range = llvm::iterator_range<prefs_iterator>; using const_prefs_range = llvm::iterator_range<const_prefs_iterator>; prefs_range prefs() { return prefs_range(reinterpret_cast<Expr **>(std::next(varlist_begin())), reinterpret_cast<Expr **>(varlist_end())); } const_prefs_range prefs() const { auto Prefs = const_cast<OMPInitClause *>(this)->prefs(); return const_prefs_range(Prefs.begin(), Prefs.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_init; } }; /// This represents the 'use' clause in '#pragma omp ...' directives. /// /// \code /// #pragma omp interop use(obj) /// \endcode class OMPUseClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Location of interop variable. SourceLocation VarLoc; /// The interop variable. Stmt *InteropVar = nullptr; /// Set the interop variable. void setInteropVar(Expr *E) { InteropVar = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } public: /// Build 'use' clause with and interop variable expression \a InteropVar. /// /// \param InteropVar The interop variable. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. OMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_use, StartLoc, EndLoc), LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {} /// Build an empty clause. OMPUseClause() : OMPClause(llvm::omp::OMPC_use, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() const { return cast<Expr>(InteropVar); } child_range children() { return child_range(&InteropVar, &InteropVar + 1); } const_child_range children() const { return const_child_range(&InteropVar, &InteropVar + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive or the '#pragma omp interop' directive.. /// /// \code /// #pragma omp depobj(a) destroy /// #pragma omp interop destroy(obj) /// \endcode /// In these examples directive '#pragma omp depobj' and '#pragma omp interop' /// have a 'destroy' clause. The 'interop' directive includes an object. class OMPDestroyClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Location of interop variable. SourceLocation VarLoc; /// The interop variable. Stmt *InteropVar = nullptr; /// Set the interop variable. void setInteropVar(Expr *E) { InteropVar = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } public: /// Build 'destroy' clause with an interop variable expression \a InteropVar. /// /// \param InteropVar The interop variable. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. OMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc), LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {} /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() const { return cast_or_null<Expr>(InteropVar); } child_range children() { if (InteropVar) return child_range(&InteropVar, &InteropVar + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (InteropVar) return const_child_range(&InteropVar, &InteropVar + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'novariants' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp dispatch novariants(a > 5) /// \endcode /// In this example directive '#pragma omp dispatch' has simple 'novariants' /// clause with condition 'a > 5'. class OMPNovariantsClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'novariants' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNovariantsClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_novariants, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPNovariantsClause() : OMPClause(llvm::omp::OMPC_novariants, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNovariantsClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_novariants; } }; /// This represents 'nocontext' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp dispatch nocontext(a > 5) /// \endcode /// In this example directive '#pragma omp dispatch' has simple 'nocontext' /// clause with condition 'a > 5'. class OMPNocontextClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'nocontext' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNocontextClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nocontext, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPNocontextClause() : OMPClause(llvm::omp::OMPC_nocontext, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNocontextClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nocontext; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This represents 'filter' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp masked filter(tid) /// \endcode /// In this example directive '#pragma omp masked' has 'filter' clause with /// thread id. class OMPFilterClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Express of the 'filter' clause. Stmt *ThreadID = nullptr; /// Sets the thread identifier. void setThreadID(Expr *TID) { ThreadID = TID; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'filter' clause with thread-id \a ThreadID. /// /// \param ThreadID Thread identifier. /// \param HelperE Helper expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFilterClause(Expr *ThreadID, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_filter, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadID(ThreadID) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPFilterClause() : OMPClause(llvm::omp::OMPC_filter, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return thread identifier. Expr *getThreadID() { return cast<Expr>(ThreadID); } /// Return thread identifier. Expr *getThreadID() const { return cast<Expr>(ThreadID); } child_range children() { return child_range(&ThreadID, &ThreadID + 1); } const_child_range children() const { return const_child_range(&ThreadID, &ThreadID + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_filter; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit##Class(PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMP.inc" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMP.inc" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); /// Process motion clauses. template <typename T> void VisitOMPMotionClause(T *Node); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMP.inc" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; /// The raw string as we parsed it. This is needed for the `isa` trait set /// (which accepts anything) and (later) extensions. StringRef RawString; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Check the extension trait \p TP is active. bool isExtensionActive(llvm::omp::TraitProperty TP) { for (const OMPTraitSet &Set : Sets) { if (Set.Kind != llvm::omp::TraitSet::implementation) continue; for (const OMPTraitSelector &Selector : Set.Selectors) { if (Selector.Kind != llvm::omp::TraitSelector::implementation_extension) continue; for (const OMPTraitProperty &Property : Selector.Properties) { if (Property.Kind == TP) return true; } } } return false; } /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); /// Clang specific specialization of the OMPContext to lookup target features. struct TargetOMPContext final : public llvm::omp::OMPContext { TargetOMPContext(ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, const FunctionDecl *CurrentFunctionDecl, ArrayRef<llvm::omp::TraitProperty> ConstructTraits); virtual ~TargetOMPContext() = default; /// See llvm::omp::OMPContext::matchesISATrait bool matchesISATrait(StringRef RawString) const override; private: std::function<bool(StringRef)> FeatureValidityCheck; std::function<void(StringRef)> DiagUnknownTrait; llvm::StringMap<bool> FeatureMap; }; /// Contains data for OpenMP directives: clauses, children /// expressions/statements (helpers for codegen) and associated statement, if /// any. class OMPChildren final : private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> { friend TrailingObjects; friend class OMPClauseReader; friend class OMPExecutableDirective; template <typename T> friend class OMPDeclarativeDirective; /// Numbers of clauses. unsigned NumClauses = 0; /// Number of child expressions/stmts. unsigned NumChildren = 0; /// true if the directive has associated statement. bool HasAssociatedStmt = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<OMPClause *>) const { return NumClauses; } OMPChildren() = delete; OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt) : NumClauses(NumClauses), NumChildren(NumChildren), HasAssociatedStmt(HasAssociatedStmt) {} static size_t size(unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S, unsigned NumChildren = 0); static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0); public: unsigned getNumClauses() const { return NumClauses; } unsigned getNumChildren() const { return NumChildren; } bool hasAssociatedStmt() const { return HasAssociatedStmt; } /// Set associated statement. void setAssociatedStmt(Stmt *S) { getTrailingObjects<Stmt *>()[NumChildren] = S; } void setChildren(ArrayRef<Stmt *> Children); /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPChildren *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); return getTrailingObjects<Stmt *>()[NumChildren]; } /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } ArrayRef<OMPClause *> getClauses() const { return const_cast<OMPChildren *>(this)->getClauses(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind, ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { assert(llvm::any_of( CaptureRegions, [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) { assert(hasAssociatedStmt() && "Must have associated captured statement."); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt( CaptureRegions); } MutableArrayRef<Stmt *> getChildren(); ArrayRef<Stmt *> getChildren() const { return const_cast<OMPChildren *>(this)->getChildren(); } Stmt *getRawStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) { Stmt *S = nullptr; do { S = CS->getCapturedStmt(); CS = dyn_cast<CapturedStmt>(S); } while (CS); return S; } return getAssociatedStmt(); } const Stmt *getRawStmt() const { return const_cast<OMPChildren *>(this)->getRawStmt(); } Stmt::child_range getAssociatedStmtAsRange() { if (!HasAssociatedStmt) return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator()); return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren], &getTrailingObjects<Stmt *>()[NumChildren + 1]); } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
parallel_matrix.c
//code implementation of matrix multiplication times a scalar using openmp parallelization #include <stdio.h> #include <omp.h> #define NUM_THREADS 5 int n = 1000; void evaluacion(int i, float mat[n][n]); int main(int argc, char const *argv[]) { omp_set_num_threads(NUM_THREADS); double t, t1, t2; float mat[n][n]; float num = 1.0; int j; int i; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { mat[i][j] = j; } } t1 = omp_get_wtime(); #pragma omp parallel { #pragma omp for for ( i = 0; i < n; i++){ evaluacion(i, mat); } } t2 = omp_get_wtime(); t = t2-t1; printf("t = %f \n", t ); } void evaluacion(int i, float mat[n][n]){ int j; for ( j = 0; j < n; j++){ printf("%f , ", mat[i][j]*65); if (j*1 == 9) { printf("\n"); } } } // #pragma omp for{ // // } // // }
kernel.h
/* Calculate the damage of each node. * * nlist - An (n, local_size) array containing the neighbour lists, * a value of -1 corresponds to a broken bond. * family - An array of the initial number of neighbours for each node. * n_neigh - An array of the number of neighbours (particles bound) for each node. * damage - An array of the damage for each node. * local_cache - local (local_size) array to store the bond breakages. */ void damage_of_node( const int n, const int *__restrict__ nlist, const int *__restrict__ family, int *__restrict__ n_neigh, double *__restrict__ damage) { #pragma omp target teams num_teams((n+BS-1)/BS) thread_limit(BS) { int local_cache[BS]; #pragma omp parallel { const int local_id = omp_get_thread_num(); const int local_size = BS; const int nid = omp_get_team_num(); const int global_id = nid * local_size + local_id; if (global_id < n) { //Copy values into local memory local_cache[local_id] = nlist[global_id] != -1 ? 1 : 0; //Wait for all threads #pragma omp barrier for (int i = local_size/2; i > 0; i /= 2) { if(local_id < i) { local_cache[local_id] += local_cache[local_id + i]; } //Wait for all threads #pragma omp barrier } if (local_id == 0) { // Update damage and n_neigh int neighbours = local_cache[0]; n_neigh[nid] = neighbours; damage[nid] = 1.0 - (double) neighbours / (double) (family[nid]); } } } } }
GB_unop__identity_uint32_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint32_fc64 // op(A') function: GB_unop_tran__identity_uint32_fc64 // C type: uint32_t // A type: GxB_FC64_t // cast: uint32_t cij = GB_cast_to_uint32_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint32_fc64 ( uint32_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint32_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__plus_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_01__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp64) // A*D function (colscale): GB (_AxD__plus_fp64) // D*A function (rowscale): GB (_DxB__plus_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__plus_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__plus_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp64) // C=scalar+B GB (_bind1st__plus_fp64) // C=scalar+B' GB (_bind1st_tran__plus_fp64) // C=A+scalar GB (_bind2nd__plus_fp64) // C=A'+scalar GB (_bind2nd_tran__plus_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FP64 || GxB_NO_PLUS_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__plus_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__plus_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__plus_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__plus_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc32) // A*D function (colscale): GB (_AxD__plus_fc32) // D*A function (rowscale): GB (_DxB__plus_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__plus_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__plus_fc32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc32) // C=scalar+B GB (_bind1st__plus_fc32) // C=scalar+B' GB (_bind1st_tran__plus_fc32) // C=A+scalar GB (_bind2nd__plus_fc32) // C=A'+scalar GB (_bind2nd_tran__plus_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_add (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_add (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FC32 || GxB_NO_PLUS_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_fc32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_add (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_add (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_add (x, aij) ; \ } GrB_Info GB (_bind1st_tran__plus_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_add (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__plus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
6830.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop { /* E := A*B */ for (i = 0; i < _PB_NI; i++) { #pragma omp target teams distribute thread_limit(256) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ for (i = 0; i < _PB_NJ; i++) { #pragma omp target teams distribute thread_limit(256) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ for (i = 0; i < _PB_NI; i++) { #pragma omp target teams distribute thread_limit(256) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/feature.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; register Quantum *q; register ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (Quantum *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MagickPathExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; register const Quantum *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) memset(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } *q=0; q+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; register const Quantum *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CannyEdgeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageFeatures(image,1,exception); % contrast=channel_features[RedPixelChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageFeatures method is: % % ChannelFeatures *GetImageFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { PixelInfo direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; PixelPacket gray, *grays; MagickBooleanType status; register ssize_t i, r; size_t length; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=MaxPixelChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (PixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].alpha=(~0U); grays[i].black=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (r=0; r < (ssize_t) image->rows; r++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(image,p))].red= ScaleQuantumToMap(GetPixelRed(image,p)); grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green= ScaleQuantumToMap(GetPixelGreen(image,p)); grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue= ScaleQuantumToMap(GetPixelBlue(image,p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black= ScaleQuantumToMap(GetPixelBlack(image,p)); if (image->alpha_trait != UndefinedPixelTrait) grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha= ScaleQuantumToMap(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) memset(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].black != ~0U) grays[gray.black++].black=grays[i].black; if (image->alpha_trait != UndefinedPixelTrait) if (grays[i].alpha != ~0U) grays[gray.alpha++].alpha=grays[i].alpha; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.black > number_grays) number_grays=gray.black; if (image->alpha_trait != UndefinedPixelTrait) if (gray.alpha > number_grays) number_grays=gray.alpha; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) memset(&correlation,0,sizeof(correlation)); (void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) memset(&mean,0,sizeof(mean)); (void) memset(sum,0,number_grays*sizeof(*sum)); (void) memset(&sum_squares,0,sizeof(sum_squares)); (void) memset(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) memset(&entropy_x,0,sizeof(entropy_x)); (void) memset(&entropy_xy,0,sizeof(entropy_xy)); (void) memset(&entropy_xy1,0,sizeof(entropy_xy1)); (void) memset(&entropy_xy2,0,sizeof(entropy_xy2)); (void) memset(&entropy_y,0,sizeof(entropy_y)); (void) memset(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) memset(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) memset(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (r=0; r < (ssize_t) image->rows; r++) { register const Quantum *magick_restrict p; register ssize_t x; ssize_t offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+ 2*distance,distance+2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=distance*GetPixelChannels(image);; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p))) u++; while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p))) u++; while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].black++; cooccurrence[v][u].direction[i].black++; } if (image->alpha_trait != UndefinedPixelTrait) { u=0; v=0; while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p))) u++; while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].alpha++; cooccurrence[v][u].direction[i].alpha++; } } p+=GetPixelChannels(image); } } grays=(PixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; register ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].black*=normalize; if (image->alpha_trait != UndefinedPixelTrait) cooccurrence[x][y].direction[i].alpha*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BluePixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].black* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].alpha* cooccurrence[x][y].direction[i].alpha; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].black+=x*y* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) correlation.direction[i].alpha+=x*y* cooccurrence[x][y].direction[i].alpha; /* Inverse Difference Moment. */ channel_features[RedPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BluePixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[y+x+2].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Entropy. */ channel_features[RedPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BluePixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].alpha* MagickLog10(cooccurrence[x][y].direction[i].alpha); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->alpha_trait != UndefinedPixelTrait) density_x[x].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].black+= cooccurrence[x][y].direction[i].black; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_y[y].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].black+=y*sum[y].direction[i].black; sum_squares.direction[i].black+=y*y*sum[y].direction[i].black; } if (image->alpha_trait != UndefinedPixelTrait) { mean.direction[i].alpha+=y*sum[y].direction[i].alpha; sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedPixelChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenPixelChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BluePixelChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].correlation[i]= (correlation.direction[i].black-mean.direction[i].black* mean.direction[i].black)/(sqrt(sum_squares.direction[i].black- (mean.direction[i].black*mean.direction[i].black))*sqrt( sum_squares.direction[i].black-(mean.direction[i].black* mean.direction[i].black))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].correlation[i]= (correlation.direction[i].alpha-mean.direction[i].alpha* mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha- (mean.direction[i].alpha*mean.direction[i].alpha))*sqrt( sum_squares.direction[i].alpha-(mean.direction[i].alpha* mean.direction[i].alpha))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].alpha; /* Sum entropy. */ channel_features[RedPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Sum variance. */ channel_features[RedPixelChannel].sum_variance[i]+= (x-channel_features[RedPixelChannel].sum_entropy[i])* (x-channel_features[RedPixelChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_variance[i]+= (x-channel_features[GreenPixelChannel].sum_entropy[i])* (x-channel_features[GreenPixelChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_variance[i]+= (x-channel_features[BluePixelChannel].sum_entropy[i])* (x-channel_features[BluePixelChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_variance[i]+= (x-channel_features[BlackPixelChannel].sum_entropy[i])* (x-channel_features[BlackPixelChannel].sum_entropy[i])* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_variance[i]+= (x-channel_features[AlphaPixelChannel].sum_entropy[i])* (x-channel_features[AlphaPixelChannel].sum_entropy[i])* density_xy[x].direction[i].alpha; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=(y-mean.direction[i].black+1)* (y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)* (y-mean.direction[i].alpha+1)* cooccurrence[x][y].direction[i].alpha; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy.direction[i].alpha-= cooccurrence[x][y].direction[i].alpha*MagickLog10( cooccurrence[x][y].direction[i].alpha); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].black-=( cooccurrence[x][y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy1.direction[i].alpha-=( cooccurrence[x][y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].black-=(density_x[x].direction[i].black* density_y[y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha* density_y[y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); } } channel_features[RedPixelChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenPixelChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BluePixelChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].variance_sum_of_squares[i]= variance.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].variance_sum_of_squares[i]= variance.direction[i].alpha; } /* Compute more texture features. */ (void) memset(&variance,0,sizeof(variance)); (void) memset(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=density_xy[x].direction[i].alpha; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].black+=density_xy[x].direction[i].black* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha* density_xy[x].direction[i].alpha; /* Difference entropy. */ channel_features[RedPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].black-=(density_x[x].direction[i].black* MagickLog10(density_x[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha* MagickLog10(density_x[x].direction[i].alpha)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].black-=(density_y[x].direction[i].black* MagickLog10(density_y[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha* MagickLog10(density_y[x].direction[i].alpha)); } /* Difference variance. */ channel_features[RedPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BluePixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].black)- (variance.direction[i].black*variance.direction[i].black))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].alpha)- (variance.direction[i].alpha*variance.direction[i].alpha))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BluePixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/ (entropy_x.direction[i].black > entropy_y.direction[i].black ? entropy_x.direction[i].black : entropy_y.direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/ (entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ? entropy_x.direction[i].alpha : entropy_y.direction[i].alpha); channel_features[RedPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BluePixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black- entropy_xy.direction[i].black))))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha- entropy_xy.direction[i].alpha))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { register ssize_t y; ChannelStatistics pixel; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) pixel.direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } /* Maximum Correlation Coefficient. */ Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/ density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black* cooccurrence[y][x].direction[i].black/ density_x[z].direction[i].black/density_y[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) Q[z][y].direction[i].alpha+= cooccurrence[z][x].direction[i].alpha* cooccurrence[y][x].direction[i].alpha/ density_x[z].direction[i].alpha/ density_y[x].direction[i].alpha; } } channel_features[RedPixelChannel].contrast[i]+=z*z* pixel.direction[i].red; channel_features[GreenPixelChannel].contrast[i]+=z*z* pixel.direction[i].green; channel_features[BluePixelChannel].contrast[i]+=z*z* pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].contrast[i]+=z*z* pixel.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].contrast[i]+=z*z* pixel.direction[i].alpha; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BluePixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use HoughLineImage() in conjunction with any binary edge extracted image (we % recommand Canny) to identify lines in the image. The algorithm accumulates % counts for every white pixel for every possible orientation (for angles from % 0 to 179 in 1 degree increments) and distance from the center of the image to % the corner (in 1 px increments) and stores the counts in an accumulator matrix % of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next % it searches this space for peaks in counts and converts the locations of the % peaks to slope and intercept in the normal x,y input image space. Use the % slope/intercepts to find the endpoints clipped to the bounds of the image. The % lines are then drawn. The counts are a measure of the length of the lines % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/ DefaultResolution; draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image,exception) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireMagickMemory((size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) memcpy(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MagickPathExtent], path[MagickPathExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; register ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { register ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CannyEdgeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MagickPathExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "# x1,y1 x2,y2 # count angle distance\n"); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { register ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MagickPathExtent, "line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2, maxima,(double) x,(double) y); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsStringTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,0,0,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse) { mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,progress) \ magick_number_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) mean_image->columns; x++) { PixelInfo mean_pixel, previous_pixel; PointInfo mean_location, previous_location; register ssize_t i; GetPixelInfo(image,&mean_pixel); GetPixelInfoPixel(image,p,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; PixelInfo sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetPixelInfo(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelInfo pixel; status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.alpha+=pixel.alpha; count++; } } } } gamma=1.0/count; mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.alpha=gamma*sum_pixel.alpha; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q); SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q); SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q); SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q); p+=GetPixelChannels(image); q+=GetPixelChannels(mean_image); } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MeanShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
column_matrix.h
/*! * Copyright 2017 by Contributors * \file column_matrix.h * \brief Utility for fast column-wise access * \author Philip Cho */ #ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_ #define XGBOOST_COMMON_COLUMN_MATRIX_H_ #include <limits> #include <vector> #include <memory> #include "hist_util.h" namespace xgboost { namespace common { class ColumnMatrix; /*! \brief column type */ enum ColumnType { kDenseColumn, kSparseColumn }; /*! \brief a column storage, to be used with ApplySplit. Note that each bin id is stored as index[i] + index_base. Different types of column index for each column allow to reduce the memory usage. */ template <typename BinIdxType> class Column { public: Column(ColumnType type, common::Span<const BinIdxType> index, const uint32_t index_base) : type_(type), index_(index), index_base_(index_base) {} uint32_t GetGlobalBinIdx(size_t idx) const { return index_base_ + static_cast<uint32_t>(index_[idx]); } BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; } const uint32_t GetBaseIdx() const { return index_base_; } common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; } ColumnType GetType() const { return type_; } /* returns number of elements in column */ size_t Size() const { return index_.size(); } private: /* type of column */ ColumnType type_; /* bin indexes in range [0, max_bins - 1] */ common::Span<const BinIdxType> index_; /* bin index offset for specific feature */ const uint32_t index_base_; }; template <typename BinIdxType> class SparseColumn: public Column<BinIdxType> { public: SparseColumn(ColumnType type, common::Span<const BinIdxType> index, uint32_t index_base, common::Span<const size_t> row_ind) : Column<BinIdxType>(type, index, index_base), row_ind_(row_ind) {} const size_t* GetRowData() const { return row_ind_.data(); } size_t GetRowIdx(size_t idx) const { return row_ind_.data()[idx]; } private: /* indexes of rows */ common::Span<const size_t> row_ind_; }; template <typename BinIdxType> class DenseColumn: public Column<BinIdxType> { public: DenseColumn(ColumnType type, common::Span<const BinIdxType> index, uint32_t index_base, const std::vector<bool>::const_iterator missing_flags) : Column<BinIdxType>(type, index, index_base), missing_flags_(missing_flags) {} bool IsMissing(size_t idx) const { return missing_flags_[idx]; } private: /* flags for missing values in dense columns */ std::vector<bool>::const_iterator missing_flags_; }; /*! \brief a collection of columns, with support for construction from GHistIndexMatrix. */ class ColumnMatrix { public: // get number of features inline bst_uint GetNumFeature() const { return static_cast<bst_uint>(type_.size()); } // construct column matrix from GHistIndexMatrix inline void Init(const GHistIndexMatrix& gmat, double sparse_threshold) { const int32_t nfeature = static_cast<int32_t>(gmat.cut.Ptrs().size() - 1); const size_t nrow = gmat.row_ptr.size() - 1; // identify type of each column feature_counts_.resize(nfeature); type_.resize(nfeature); std::fill(feature_counts_.begin(), feature_counts_.end(), 0); uint32_t max_val = std::numeric_limits<uint32_t>::max(); for (int32_t fid = 0; fid < nfeature; ++fid) { CHECK_LE(gmat.cut.Ptrs()[fid + 1] - gmat.cut.Ptrs()[fid], max_val); } bool all_dense = gmat.IsDense(); gmat.GetFeatureCounts(&feature_counts_[0]); // classify features for (int32_t fid = 0; fid < nfeature; ++fid) { if (static_cast<double>(feature_counts_[fid]) < sparse_threshold * nrow) { type_[fid] = kSparseColumn; all_dense = false; } else { type_[fid] = kDenseColumn; } } // want to compute storage boundary for each feature // using variants of prefix sum scan feature_offsets_.resize(nfeature + 1); size_t accum_index_ = 0; feature_offsets_[0] = accum_index_; for (int32_t fid = 1; fid < nfeature + 1; ++fid) { if (type_[fid - 1] == kDenseColumn) { accum_index_ += static_cast<size_t>(nrow); } else { accum_index_ += feature_counts_[fid - 1]; } feature_offsets_[fid] = accum_index_; } SetTypeSize(gmat.max_num_bins); index_.resize(feature_offsets_[nfeature] * bins_type_size_, 0); if (!all_dense) { row_ind_.resize(feature_offsets_[nfeature]); } // store least bin id for each feature index_base_ = const_cast<uint32_t*>(gmat.cut.Ptrs().data()); const bool noMissingValues = NoMissingValues(gmat.row_ptr[nrow], nrow, nfeature); if (noMissingValues) { missing_flags_.resize(feature_offsets_[nfeature], false); } else { missing_flags_.resize(feature_offsets_[nfeature], true); } // pre-fill index_ for dense columns if (all_dense) { BinTypeSize gmat_bin_size = gmat.index.GetBinTypeSize(); if (gmat_bin_size == kUint8BinsTypeSize) { SetIndexAllDense(gmat.index.data<uint8_t>(), gmat, nrow, nfeature, noMissingValues); } else if (gmat_bin_size == kUint16BinsTypeSize) { SetIndexAllDense(gmat.index.data<uint16_t>(), gmat, nrow, nfeature, noMissingValues); } else { CHECK_EQ(gmat_bin_size, kUint32BinsTypeSize); SetIndexAllDense(gmat.index.data<uint32_t>(), gmat, nrow, nfeature, noMissingValues); } /* For sparse DMatrix gmat.index.getBinTypeSize() returns always kUint32BinsTypeSize but for ColumnMatrix we still have a chance to reduce the memory consumption */ } else { if (bins_type_size_ == kUint8BinsTypeSize) { SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); } else if (bins_type_size_ == kUint16BinsTypeSize) { SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); } else { CHECK_EQ(bins_type_size_, kUint32BinsTypeSize); SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); } } } /* Set the number of bytes based on numeric limit of maximum number of bins provided by user */ void SetTypeSize(size_t max_num_bins) { if ( (max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint8_t>::max()) ) { bins_type_size_ = kUint8BinsTypeSize; } else if ((max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint16_t>::max())) { bins_type_size_ = kUint16BinsTypeSize; } else { bins_type_size_ = kUint32BinsTypeSize; } } /* Fetch an individual column. This code should be used with type swith to determine type of bin id's */ template <typename BinIdxType> std::unique_ptr<const Column<BinIdxType> > GetColumn(unsigned fid) const { CHECK_EQ(sizeof(BinIdxType), bins_type_size_); const size_t feature_offset = feature_offsets_[fid]; // to get right place for certain feature const size_t column_size = feature_offsets_[fid + 1] - feature_offset; common::Span<const BinIdxType> bin_index = { reinterpret_cast<const BinIdxType*>( &index_[feature_offset * bins_type_size_]), column_size }; std::unique_ptr<const Column<BinIdxType> > res; if (type_[fid] == ColumnType::kDenseColumn) { std::vector<bool>::const_iterator column_iterator = missing_flags_.begin(); advance(column_iterator, feature_offset); // increment iterator to right position res.reset(new DenseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid], column_iterator)); } else { res.reset(new SparseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid], {&row_ind_[feature_offset], column_size})); } return res; } template<typename T> inline void SetIndexAllDense(T* index, const GHistIndexMatrix& gmat, const size_t nrow, const size_t nfeature, const bool noMissingValues) { T* local_index = reinterpret_cast<T*>(&index_[0]); /* missing values make sense only for column with type kDenseColumn, and if no missing values were observed it could be handled much faster. */ if (noMissingValues) { const int32_t nthread = omp_get_max_threads(); // NOLINT #pragma omp parallel for num_threads(nthread) for (omp_ulong rid = 0; rid < nrow; ++rid) { const size_t ibegin = rid*nfeature; const size_t iend = (rid+1)*nfeature; size_t j = 0; for (size_t i = ibegin; i < iend; ++i, ++j) { const size_t idx = feature_offsets_[j]; local_index[idx + rid] = index[i]; } } } else { /* to handle rows in all batches, sum of all batch sizes equal to gmat.row_ptr.size() - 1 */ size_t rbegin = 0; for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) { const xgboost::Entry* data_ptr = batch.data.HostVector().data(); const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector(); const size_t batch_size = batch.Size(); CHECK_LT(batch_size, offset_vec.size()); for (size_t rid = 0; rid < batch_size; ++rid) { const size_t size = offset_vec[rid + 1] - offset_vec[rid]; SparsePage::Inst inst = {data_ptr + offset_vec[rid], size}; const size_t ibegin = gmat.row_ptr[rbegin + rid]; const size_t iend = gmat.row_ptr[rbegin + rid + 1]; CHECK_EQ(ibegin + inst.size(), iend); size_t j = 0; size_t fid = 0; for (size_t i = ibegin; i < iend; ++i, ++j) { fid = inst[j].index; const size_t idx = feature_offsets_[fid]; /* rbegin allows to store indexes from specific SparsePage batch */ local_index[idx + rbegin + rid] = index[i]; missing_flags_[idx + rbegin + rid] = false; } } rbegin += batch.Size(); } } } template<typename T> inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat, const size_t nrow, const size_t nfeature) { std::vector<size_t> num_nonzeros; num_nonzeros.resize(nfeature); std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0); T* local_index = reinterpret_cast<T*>(&index_[0]); size_t rbegin = 0; for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) { const xgboost::Entry* data_ptr = batch.data.HostVector().data(); const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector(); const size_t batch_size = batch.Size(); CHECK_LT(batch_size, offset_vec.size()); for (size_t rid = 0; rid < batch_size; ++rid) { const size_t ibegin = gmat.row_ptr[rbegin + rid]; const size_t iend = gmat.row_ptr[rbegin + rid + 1]; size_t fid = 0; const size_t size = offset_vec[rid + 1] - offset_vec[rid]; SparsePage::Inst inst = {data_ptr + offset_vec[rid], size}; CHECK_EQ(ibegin + inst.size(), iend); size_t j = 0; for (size_t i = ibegin; i < iend; ++i, ++j) { const uint32_t bin_id = index[i]; fid = inst[j].index; if (type_[fid] == kDenseColumn) { T* begin = &local_index[feature_offsets_[fid]]; begin[rid + rbegin] = bin_id - index_base_[fid]; missing_flags_[feature_offsets_[fid] + rid + rbegin] = false; } else { T* begin = &local_index[feature_offsets_[fid]]; begin[num_nonzeros[fid]] = bin_id - index_base_[fid]; row_ind_[feature_offsets_[fid] + num_nonzeros[fid]] = rid + rbegin; ++num_nonzeros[fid]; } } } rbegin += batch.Size(); } } const BinTypeSize GetTypeSize() const { return bins_type_size_; } const bool NoMissingValues(const size_t n_elements, const size_t n_row, const size_t n_features) { return n_elements == n_features * n_row; } private: std::vector<uint8_t> index_; std::vector<size_t> feature_counts_; std::vector<ColumnType> type_; std::vector<size_t> row_ind_; /* indicate where each column's index and row_ind is stored. */ std::vector<size_t> feature_offsets_; // index_base_[fid]: least bin id for feature fid uint32_t* index_base_; std::vector<bool> missing_flags_; BinTypeSize bins_type_size_; }; } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
ZQ_FaceDatabase.h
#ifndef _ZQ_FACE_DATABASE_H_ #define _ZQ_FACE_DATABASE_H_ #pragma once #include <vector> #include <string> #include "ZQ_FaceFeature.h" #include "ZQ_FaceRecognizerSphereFace.h" #include "ZQ_MathBase.h" #include "ZQ_MergeSort.h" #include <omp.h> namespace ZQ { class ZQ_FaceDatabase { public: class Person { public: std::vector<ZQ_FaceFeature> features; std::vector<std::string> filenames; }; friend class ZQ_FaceDatabaseMaker; private: std::vector<Person> persons; std::vector<std::string> names; public: bool Search(const std::vector<ZQ_FaceFeature>& feat, std::vector<int>& out_ids, std::vector<float>& out_scores, std::vector<std::string>& out_names, std::vector<std::string>& out_filenames, int max_num = 3, int max_thread_num = 1) const { return _find_the_best_matches(feat, *this, out_ids, out_scores, out_names, out_filenames, max_num, max_thread_num); } bool ExportSimilarityForAllPairs(const std::string& out_score_file, const std::string& out_flag_file, __int64& all_pair_num, __int64& same_pair_num, __int64& notsame_pair_num, int max_thread_num, bool quantization) const { return _export_similarity_for_all_pairs(out_score_file, out_flag_file, all_pair_num, same_pair_num, notsame_pair_num, max_thread_num, quantization); } bool SelectSubset(const std::string& out_file, int max_thread_num, int num_image_thresh = 10, float similarity_thresh = 0.5) const { return _select_subset(out_file, max_thread_num, similarity_thresh, num_image_thresh); } bool SelectSubsetDesiredNum(const std::string& out_file, int desired_person_num, int min_image_num_per_person, int max_image_num_per_person, int max_thread_num, float similarity_thresh = 0.5) const { return _select_subset_desired_num(out_file, desired_person_num, min_image_num_per_person, max_image_num_per_person, max_thread_num, similarity_thresh); } bool DetectRepeatPerson(const std::string& out_file, int max_thread_num, float similarity_thresh = 0.5) const { return _detect_repeat_person(out_file, max_thread_num, similarity_thresh); } bool DetectLowestPair(const std::string& out_file, int max_thread_num, float similarity_thresh = 0.5) const { return _detect_lowest_pair(out_file, max_thread_num, similarity_thresh); } void Clear() { persons.clear(); names.clear(); } bool LoadFromFileBinay(const std::string& feats_file, const std::string& names_file) { Clear(); if (!_load_feats_binary(feats_file)) { Clear(); return false; } if (!_load_names(names_file)) { Clear(); return false; } if (persons.size() != names.size()) { Clear(); return false; } return true; } bool SaveToFileBinary(const std::string& feats_file, const std::string& names_file) { if (!_check_valid()) { printf("not a valid database\n"); return false; } if (!_write_feats_binary(feats_file)) { printf("failed to save %s\n", feats_file.c_str()); return false; } if (!_write_names(names_file)) { printf("failed to save %s\n", names_file.c_str()); return false; } return true; } bool SaveToFileBinaryCompact(const std::string& feats_file, const std::string& names_file) { if (!_check_valid()) { printf("not a valid database\n"); return false; } if (!_write_feats_binary_compact(feats_file)) { printf("failed to save %s\n", feats_file.c_str()); return false; } if (!_write_names(names_file)) { printf("failed to save %s\n", names_file.c_str()); return false; } return true; } private: bool _check_valid() { int person_num = persons.size(); if (person_num == 0) return false; if (person_num != names.size()) return false; for (int i = 0; i < person_num; i++) { int feat_num = persons[i].features.size(); if (feat_num == 0) return false; if (feat_num != persons[i].filenames.size()) return false; } int feat_dim = persons[0].features[0].length; if (feat_dim == 0) return false; for (int i = 0; i < person_num; i++) { int feat_num = persons[i].features.size(); for (int j = 0; j < feat_num; j++) { if (feat_dim != persons[i].features[j].length) return false; } } return true; } bool _write_feats_binary(const std::string& file) { FILE* out = 0; if (0 != fopen_s(&out, file.c_str(), "wb")) return false; int person_num = persons.size(); int feat_dim = persons[0].features[0].length; if (1 != fwrite(&feat_dim, sizeof(int), 1, out)) { fclose(out); return false; } if (1 != fwrite(&person_num, sizeof(int), 1, out)) { fclose(out); return false; } char end_c = '\0'; for (int i = 0; i < person_num; i++) { int feat_num = persons[i].features.size(); if (1 != fwrite(&feat_num, sizeof(int), 1, out)) { fclose(out); return false; } for (int j = 0; j < feat_num; j++) { const char* str = persons[i].filenames[j].c_str(); int len = strlen(str) + 1; if (1 != fwrite(&len, sizeof(int), 1, out)) { fclose(out); return false; } if ((len-1) != fwrite(str, 1, len-1, out)) { fclose(out); return false; } if (1 != fwrite(&end_c, sizeof(char), 1, out)) { fclose(out); return false; } if (feat_dim != fwrite(persons[i].features[j].pData, sizeof(float), feat_dim, out)) { fclose(out); return false; } } } fclose(out); return true; } bool _write_feats_binary_compact(const std::string& file) { FILE* out = 0; if (0 != fopen_s(&out, file.c_str(), "wb")) return false; int person_num = persons.size(); int feat_dim = persons[0].features[0].length; if (1 != fwrite(&feat_dim, sizeof(int), 1, out)) { fclose(out); return false; } if (1 != fwrite(&person_num, sizeof(int), 1, out)) { fclose(out); return false; } for (int i = 0; i < person_num; i++) { int feat_num = persons[i].features.size(); if (1 != fwrite(&feat_num, sizeof(int), 1, out)) { fclose(out); return false; } } for (int i = 0; i < person_num; i++) { int feat_num = persons[i].features.size(); for (int j = 0; j < feat_num; j++) { if (feat_dim != fwrite(persons[i].features[j].pData, sizeof(float), feat_dim, out)) { fclose(out); return false; } } } fclose(out); return true; } bool _load_feats_binary(const std::string& file) { FILE* in = 0; if (0 != fopen_s(&in, file.c_str(), "rb")) return false; int person_num = 0; int feat_dim = 0; if (1 != fread(&feat_dim, sizeof(int), 1, in)) { fclose(in); return false; } if (1 != fread(&person_num, sizeof(int), 1, in)) { fclose(in); return false; } if (person_num <= 0 || feat_dim <= 0) { fclose(in); return false; } std::vector<char> buf; persons.resize(person_num); for (int i = 0; i < person_num; i++) { int feat_num = 0; if (1 != fread(&feat_num, sizeof(int), 1, in)) { fclose(in); return false; } if (feat_num <= 0) { fclose(in); return false; } persons[i].features.resize(feat_num); persons[i].filenames.resize(feat_num); for (int j = 0; j < feat_num; j++) { int len; if (1 != fread(&len, sizeof(int), 1, in)) { fclose(in); return false; } buf.resize(len); if (len > 0) { if (len != fread(&buf[0], 1, len, in) || buf[len-1] != '\0') { fclose(in); return false; } persons[i].filenames[j] = &buf[0]; } persons[i].features[j].ChangeSize(feat_dim); if (feat_dim != fread(persons[i].features[j].pData, sizeof(float), feat_dim, in)) { fclose(in); return false; } } } fclose(in); return true; } bool _write_names(const std::string& file) { FILE* out = 0; if (0 != fopen_s(&out, file.c_str(), "w")) return false; int person_num = names.size(); for (int i = 0; i < person_num; i++) { fprintf(out, "%s\n", names[i].c_str()); } fclose(out); return true; } bool _load_names(const std::string& file) { FILE* in = 0; if (0 != fopen_s(&in, file.c_str(), "r")) return false; char line[200] = { 0 }; while (true) { line[0] = '\0'; fgets(line, 199, in); if (line[0] == '\0') break; int len = strlen(line); if (line[len - 1] == '\n') line[--len] = '\0'; names.push_back(std::string(line)); } fclose(in); return true; } static bool _find_the_best_matches(const std::vector<ZQ_FaceFeature>& feat, const ZQ_FaceDatabase& database, std::vector<int>& out_ids, std::vector<float>& out_scores, std::vector<std::string>& out_names, std::vector<std::string>& out_filenames, int max_num, int max_thread_num) { int feat_num = feat.size(); if (feat_num == 0) return false; double t1 = omp_get_wtime(); int person_num = database.persons.size(); std::vector<int> person_j(person_num); std::vector<float> scores(person_num); std::vector<int> ids(person_num); int num_procs = omp_get_num_procs(); int real_threads = __max(1, __min(max_thread_num, num_procs - 1)); //printf("real_threads = %d\n", real_threads); #pragma omp parallel for schedule(dynamic) num_threads(real_threads) for (int i = 0; i < person_num; i++) { ids[i] = i; float max_score = -FLT_MAX; int max_id = -1; for (int j = 0; j < database.persons[i].features.size(); j++) { float tmp_score = -FLT_MAX; for (int k = 0; k < feat_num; k++) { if (feat[k].length == database.persons[i].features[j].length) { tmp_score = ZQ_FaceRecognizerSphereFace::CalSimilarity(feat[k].length, feat[k].pData, database.persons[i].features[j].pData); } if (max_id < 0) { max_id = 0; max_score = tmp_score; } else { if (max_score < tmp_score) { max_id = j; max_score = tmp_score; } } } } person_j[i] = max_id; scores[i] = max_score; } double t2 = omp_get_wtime(); out_ids.clear(); out_scores.clear(); out_names.clear(); out_filenames.clear(); for (int i = 0; i < __min(max_num, person_num); i++) { float max_score = scores[i]; int max_id = i; for (int j = i + 1; j < person_num; j++) { if (max_score < scores[j]) { max_id = j; max_score = scores[j]; } } int tmp_id = ids[i]; ids[i] = ids[max_id]; ids[max_id] = tmp_id; float tmp_score = scores[i]; scores[i] = scores[max_id]; scores[max_id] = tmp_score; out_ids.push_back(ids[i]); out_scores.push_back(scores[i]); out_names.push_back(database.names[ids[i]]); out_filenames.push_back(database.persons[ids[i]].filenames[person_j[ids[i]]]); } double t3 = omp_get_wtime(); //printf("part1 = %.3f, part2 = %.3f\n", 0.001*(t2 - t1), 0.001*(t3 - t2)); return true; } bool _export_similarity_for_all_pairs(const std::string& out_score_file, const std::string& out_flag_file, __int64& all_pair_num, __int64& same_pair_num, __int64& notsame_pair_num, int max_thread_num, bool quantization) const { FILE* out1 = 0; if (0 != fopen_s(&out1, out_score_file.c_str(), "wb")) { printf("failed to create file %s\n", out_score_file.c_str()); return false; } FILE* out2 = 0; if (0 != fopen_s(&out2, out_flag_file.c_str(), "wb")) { printf("failed to create file %s\n", out_flag_file.c_str()); fclose(out1); return false; } int dim = persons[0].features[0].length; __int64 person_num = persons.size(); __int64 total_face_num = 0; std::vector<__int64> cur_face_offset(person_num); for (int pp = 0; pp < person_num; pp++) { cur_face_offset[pp] = total_face_num; __int64 cur_face_num = persons[pp].features.size(); total_face_num += cur_face_num; } all_pair_num = total_face_num *(total_face_num - 1) / 2; int real_thread_num = __max(1, __min(max_thread_num, omp_get_num_procs() - 1)); if (real_thread_num == 1) { for (int pp = 0; pp < person_num; pp++) { __int64 cur_face_num = persons[pp].features.size(); __int64 max_pair_num = (total_face_num - cur_face_offset[pp] - 1); std::vector<float> scores(max_pair_num); std::vector<char> flags(max_pair_num); for (__int64 i = 0; i < cur_face_num; i++) { float* cur_i_feat = persons[pp].features[i].pData; float* cur_j_feat; int idx = 0; for (__int64 j = i + 1; j < cur_face_num; j++) { cur_j_feat = persons[pp].features[j].pData; scores[idx] = ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); flags[idx] = 1; same_pair_num++; idx++; } for (__int64 qq = pp + 1; qq < person_num; qq++) { for (__int64 j = 0; j < persons[qq].features.size(); j++) { cur_j_feat = persons[qq].features[j].pData; scores[idx] = ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); flags[idx] = 0; notsame_pair_num++; idx++; } } if (idx > 0) { if (quantization) { std::vector<short> short_scores(idx); for (int j = 0; j < idx; j++) short_scores[j] = __min(SHRT_MAX, __max(-SHRT_MAX, scores[j] * SHRT_MAX)); fwrite(&short_scores[0], sizeof(short), idx, out1); } else { fwrite(&scores[0], sizeof(float), idx, out1); } fwrite(&flags[0], 1, idx, out2); } } printf("%d/%d handled\n", pp + 1, person_num); } } else { int chunk_size = 100; int handled[1] = { 0 }; __int64 tmp_same_pair_num[1] = { 0 }; printf("real_thread_num = %d\n", real_thread_num); #pragma omp parallel for schedule(dynamic,chunk_size) num_threads(real_thread_num) shared(handled) for (int pp = 0; pp < person_num; pp++) { __int64 cur_face_num = persons[pp].features.size(); __int64 max_pair_num = (total_face_num - cur_face_offset[pp] - 1); std::vector<float> scores(max_pair_num); std::vector<char> flags(max_pair_num); for (__int64 i = 0; i < cur_face_num; i++) { float* cur_i_feat = persons[pp].features[i].pData; float* cur_j_feat; int idx = 0; for (__int64 j = i + 1; j < cur_face_num; j++) { cur_j_feat = persons[pp].features[j].pData; scores[idx] = ZQ::ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); flags[idx] = 1; same_pair_num++; idx++; } for (__int64 qq = pp + 1; qq < person_num; qq++) { for (__int64 j = 0; j < persons[qq].features.size(); j++) { cur_j_feat = persons[qq].features[j].pData; scores[idx] = ZQ::ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); flags[idx] = 0; notsame_pair_num++; idx++; } } #pragma omp critical { if (idx > 0) { for (int kk = 0; kk < idx; kk++) { (*tmp_same_pair_num) += flags[kk]; } if (quantization) { std::vector<short> short_scores(idx); for (int j = 0; j < idx; j++) short_scores[j] = __min(SHRT_MAX, __max(-SHRT_MAX, scores[j] * SHRT_MAX)); fwrite(&short_scores[0], sizeof(short), idx, out1); } else { fwrite(&scores[0], sizeof(float), idx, out1); } fwrite(&flags[0], 1, idx, out2); } } } #pragma omp critical { (*handled)++; printf("%d/%d\n", *handled, person_num); } } same_pair_num = tmp_same_pair_num[0]; notsame_pair_num = all_pair_num - same_pair_num; } fclose(out1); fclose(out2); return true; } bool _select_subset_desired_num(const std::string& out_file, int desired_person_num, int min_image_num_per_person, int max_image_num_per_person, int max_thread_num, float similarity_thresh) const { std::vector<int> person_ids, pivot_ids; std::vector<std::vector<int>> other_good_ids; if (!_select_subset(person_ids, pivot_ids, other_good_ids, max_thread_num, similarity_thresh, min_image_num_per_person)) { return false; } FILE* out = 0; if (0 != fopen_s(&out, out_file.c_str(), "w")) { return false; } std::vector<int> select_ids; int person_num = person_ids.size(); for (int i = 0; i < person_num; i++) select_ids.push_back(i); if (person_num > desired_person_num) { for (int i = 0; i < desired_person_num; i++) { int rand_id = rand() % (person_num - i) + i; if (rand_id != i) { int tmp_id = select_ids[i]; select_ids[i] = select_ids[rand_id]; select_ids[rand_id] = tmp_id; } } } else { desired_person_num = person_num; } for (int i = 0; i < desired_person_num; i++) { int select_id = select_ids[i]; int p_id = person_ids[select_id]; fprintf(out, "%s\n", persons[p_id].filenames[pivot_ids[select_id]].c_str()); if (min_image_num_per_person > 1) { int good_num = other_good_ids[select_id].size(); std::vector<int> select_good_id(good_num); for (int j = 0; j < good_num; j++) select_good_id[j] = j; int desired_image_num_per_person = __min(max_image_num_per_person, good_num + 1); for (int j = 0; j < desired_image_num_per_person - 1; j++) { int rand_id = rand() % (desired_image_num_per_person - 1 - j) + j; if (rand_id != j) { int tmp_id = select_good_id[j]; select_good_id[j] = select_good_id[rand_id]; select_good_id[rand_id] = tmp_id; } } for (int j = 0; j < desired_image_num_per_person - 1; j++) { fprintf(out, "%s\n", persons[p_id].filenames[other_good_ids[select_id][select_good_id[j]]].c_str()); } } } fclose(out); return true; } bool _select_subset(const std::string& out_file, int max_thread_num, float similarity_thresh, int num_image_thresh) const { std::vector<int> person_ids, pivot_ids; std::vector<std::vector<int>> other_good_ids; if (!_select_subset(person_ids, pivot_ids, other_good_ids, max_thread_num, similarity_thresh, num_image_thresh)) { return false; } FILE* out = 0; if (0 != fopen_s(&out, out_file.c_str(), "w")) { return false; } for (int i = 0; i < person_ids.size(); i++) { int p_id = person_ids[i]; fprintf(out, "%s\n", persons[p_id].filenames[pivot_ids[i]].c_str()); for (int j = 0; j < other_good_ids[i].size(); j++) { fprintf(out, "%s\n", persons[p_id].filenames[other_good_ids[i][j]].c_str()); } } fclose(out); return true; } bool _select_subset(std::vector<int>& person_ids, std::vector<int>& pivot_ids, std::vector<std::vector<int>>& other_good_ids, int max_thread_num, float similarity_thresh, int num_image_thresh) const { int person_num = persons.size(); if (person_num == 0 || persons[0].features.size() == 0) return false; int dim = persons[0].features[0].length; if (dim == 0) return false; person_ids.clear(); pivot_ids.clear(); other_good_ids.clear(); if (max_thread_num <= 1) { for (int p = 0; p < person_num; p++) { int cur_num = persons[p].features.size(); std::vector<float> scores(cur_num*cur_num); for (int i = 0; i < cur_num; i++) { scores[i*cur_num + i] = 1; const float* cur_i_feat = persons[p].features[i].pData; const float* cur_j_feat; for (int j = i + 1; j < cur_num; j++) { cur_j_feat = persons[p].features[j].pData; float tmp_score = ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); scores[i*cur_num + j] = tmp_score; scores[j*cur_num + i] = tmp_score; } } int pivot_id = -1; float sum_score = -FLT_MAX; for (int i = 0; i < cur_num; i++) { float tmp_sum = 0; for (int j = 0; j < cur_num; j++) tmp_sum += scores[i*cur_num + j]; if (sum_score < tmp_sum) { pivot_id = i; sum_score = tmp_sum; } } std::vector<int> ids; for (int i = 0; i < cur_num; i++) { if (scores[pivot_id*cur_num + i] >= similarity_thresh && i != pivot_id) { ids.push_back(i); } } int id_num = ids.size(); if (id_num + 1 >= num_image_thresh) { person_ids.push_back(p); pivot_ids.push_back(pivot_id); other_good_ids.push_back(ids); } } } else { int chunk_size = (person_num + max_thread_num - 1) / max_thread_num; #pragma omp parallel for schedule(static,chunk_size) num_threads(max_thread_num) for (int p = 0; p < person_num; p++) { int cur_num = persons[p].features.size(); std::vector<float> scores(cur_num*cur_num); for (int i = 0; i < cur_num; i++) { scores[i*cur_num + i] = 1; const float* cur_i_feat = persons[p].features[i].pData; const float* cur_j_feat; for (int j = i + 1; j < cur_num; j++) { cur_j_feat = persons[p].features[j].pData; float tmp_score = ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); scores[i*cur_num + j] = tmp_score; scores[j*cur_num + i] = tmp_score; } } int pivot_id = -1; float sum_score = -FLT_MAX; for (int i = 0; i < cur_num; i++) { float tmp_sum = 0; for (int j = 0; j < cur_num; j++) tmp_sum += scores[i*cur_num + j]; if (sum_score < tmp_sum) { pivot_id = i; sum_score = tmp_sum; } } std::vector<int> ids; for (int i = 0; i < cur_num; i++) { if (scores[pivot_id*cur_num + i] >= similarity_thresh && i != pivot_id) { ids.push_back(i); } } int id_num = ids.size(); if (id_num + 1 >= num_image_thresh) { #pragma omp critical { person_ids.push_back(p); pivot_ids.push_back(pivot_id); other_good_ids.push_back(ids); } } } } return true; } bool _detect_repeat_person(const std::string& out_file, int max_thread_num, float similarity_thresh) const { std::vector<std::pair<int, int>> repeat_pairs; std::vector<float> scores; if (!_detect_repeat_person(repeat_pairs, scores, max_thread_num, similarity_thresh)) { return false; } int num = scores.size(); if (num > 0) { ZQ_MergeSort::MergeSortWithData(&scores[0], &repeat_pairs[0], sizeof(std::pair<int, int>), num, false); } FILE* out = 0; if (0 != fopen_s(&out, out_file.c_str(), "w")) { return false; } for (int i = 0; i < num; i++) { fprintf(out, "%.3f %s %s\n", scores[i], names[repeat_pairs[i].first].c_str(), names[repeat_pairs[i].second].c_str()); } fclose(out); return true; } bool _detect_repeat_person(std::vector<std::pair<int,int>>& repeat_pairs, std::vector<float>& repeat_scores, int max_thread_num, float similarity_thresh) const { int person_num = persons.size(); if (person_num == 0 || persons[0].features.size() == 0) return false; int dim = persons[0].features[0].length; if (dim == 0) return false; repeat_pairs.clear(); repeat_scores.clear(); std::vector<int> pivot_ids(person_num); if (max_thread_num <= 1) { for (int p = 0; p < person_num; p++) { int cur_num = persons[p].features.size(); std::vector<float> scores(cur_num*cur_num); for (int i = 0; i < cur_num; i++) { scores[i*cur_num + i] = 1; const float* cur_i_feat = persons[p].features[i].pData; const float* cur_j_feat; for (int j = i + 1; j < cur_num; j++) { cur_j_feat = persons[p].features[j].pData; float tmp_score = ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); scores[i*cur_num + j] = tmp_score; scores[j*cur_num + i] = tmp_score; } } int pivot_id = -1; float sum_score = -FLT_MAX; for (int i = 0; i < cur_num; i++) { float tmp_sum = 0; for (int j = 0; j < cur_num; j++) tmp_sum += scores[i*cur_num + j]; if (sum_score < tmp_sum) { pivot_id = i; sum_score = tmp_sum; } } pivot_ids[p] = pivot_id; } // for (int i = 0; i < person_num; i++) { for (int j = i + 1; j < person_num; j++) { const float* cur_i_feat = persons[i].features[pivot_ids[i]].pData; const float* cur_j_feat = persons[j].features[pivot_ids[j]].pData; float tmp_score = ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); if (tmp_score >= similarity_thresh) { repeat_pairs.push_back(std::make_pair(i, j)); repeat_scores.push_back(tmp_score); } } } } else { int chunk_size = (person_num + max_thread_num - 1) / max_thread_num; #pragma omp parallel for schedule(static,chunk_size) num_threads(max_thread_num) for (int p = 0; p < person_num; p++) { int cur_num = persons[p].features.size(); std::vector<float> scores(cur_num*cur_num); for (int i = 0; i < cur_num; i++) { scores[i*cur_num + i] = 1; const float* cur_i_feat = persons[p].features[i].pData; const float* cur_j_feat; for (int j = i + 1; j < cur_num; j++) { cur_j_feat = persons[p].features[j].pData; float tmp_score = ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); scores[i*cur_num + j] = tmp_score; scores[j*cur_num + i] = tmp_score; } } int pivot_id = -1; float sum_score = -FLT_MAX; for (int i = 0; i < cur_num; i++) { float tmp_sum = 0; for (int j = 0; j < cur_num; j++) tmp_sum += scores[i*cur_num + j]; if (sum_score < tmp_sum) { pivot_id = i; sum_score = tmp_sum; } } pivot_ids[p] = pivot_id; } #pragma omp parallel for schedule(static,chunk_size) num_threads(max_thread_num) for (int i = 0; i < person_num; i++) { for (int j = i + 1; j < person_num; j++) { const float* cur_i_feat = persons[i].features[pivot_ids[i]].pData; const float* cur_j_feat = persons[j].features[pivot_ids[j]].pData; float tmp_score = ZQ_MathBase::DotProduct(dim, cur_i_feat, cur_j_feat); if (tmp_score >= similarity_thresh) { #pragma omp critical { repeat_pairs.push_back(std::make_pair(i, j)); repeat_scores.push_back(tmp_score); } } } } } return true; } bool _detect_lowest_pair(const std::string& out_file, int max_thread_num, float similarity_thresh) const { std::vector<float> scores; std::vector<std::pair<std::string, std::string>> pairs; std::vector<std::pair<std::string, std::string>* > pair_ptr; if (!_detect_lowest_pair(scores, pairs, max_thread_num, similarity_thresh)) { return false; } __int64 num = scores.size(); printf("num = %lld\n", num); if (num > 0) { for (__int64 i = 0; i < num; i++) pair_ptr.push_back(&pairs[i]); ZQ_MergeSort::MergeSortWithData(&scores[0], &pair_ptr[0], sizeof(std::pair<std::string, std::string>*), num, true); } FILE* out = 0; if (0 != fopen_s(&out, out_file.c_str(), "w")) { return false; } for (__int64 i = 0; i < num; i++) { fprintf(out, "%8.3f %s %s\n", scores[i], pair_ptr[i]->first.c_str(), pair_ptr[i]->second.c_str()); } fclose(out); return true; } bool _detect_lowest_pair(std::vector<float>& scores, std::vector<std::pair<std::string, std::string>>& pairs, int max_thread_num, float similarity_thresh) const { scores.clear(); pairs.clear(); int person_num = persons.size(); if (person_num == 0 || persons[0].features.size() == 0) return false; int dim = persons[0].features[0].length; if (max_thread_num <= 1) { for (int p = 0; p < person_num; p++) { int num = persons[p].features.size(); float out_min_score = FLT_MAX; int out_i, out_j; for (int i = 0; i < num; i++) { for (int j = i + 1; j < num; j++) { float tmp_score = ZQ_MathBase::DotProduct(dim, persons[p].features[i].pData, persons[p].features[j].pData); if (tmp_score <= out_min_score) { out_min_score = tmp_score; out_i = i; out_j = j; } } } if (out_min_score <= similarity_thresh) { scores.push_back(out_min_score); pairs.push_back(std::make_pair(persons[p].filenames[out_i], persons[p].filenames[out_j])); } } } else { int chunk_size = 100; #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(max_thread_num) for (int p = 0; p < person_num; p++) { int num = persons[p].features.size(); float out_min_score = FLT_MAX; int out_i, out_j; for (int i = 0; i < num; i++) { for (int j = i + 1; j < num; j++) { float tmp_score = ZQ_MathBase::DotProduct(dim, persons[p].features[i].pData, persons[p].features[j].pData); if (tmp_score <= out_min_score) { out_min_score = tmp_score; out_i = i; out_j = j; } } } if (out_min_score <= similarity_thresh) { #pragma omp critical { scores.push_back(out_min_score); pairs.push_back(std::make_pair(persons[p].filenames[out_i], persons[p].filenames[out_j])); } } } } return true; } }; } #endif
structure_factors_direct.h
#ifndef CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H #define CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H #include <cctbx/xray/scattering_type_registry.h> #include <cctbx/xray/hr_ht_cache.h> #include <cctbx/math/cos_sin_table.h> #include <omptbx/omp_or_stubs.h> #define CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_NO_PRAGMA_OMP namespace cctbx { namespace xray { namespace structure_factors { template <typename CosSinType, typename ScattererType> struct direct_sum_over_equivalent_h { typedef typename ScattererType::float_type float_type; typedef std::complex<float_type> complex_type; direct_sum_over_equivalent_h( CosSinType const& cos_sin_, sgtbx::space_group const& space_group_, miller::index<> h, float_type d_star_sq_) : cos_sin(cos_sin_), hr_ht(cos_sin_, space_group_, h), d_star_sq(d_star_sq_), sum_f_calc(0,0) {} void add_contribution_of(ScattererType const& scatterer, float_type f0) { typedef float_type f_t; typedef complex_type c_t; c_t f_calc(0,0); for(std::size_t i=0;i<hr_ht.groups.size();i++) { hr_ht_group<f_t> const& g = hr_ht.groups[i]; f_t hrx = g.hr * scatterer.site; c_t term = cos_sin.get(hrx + g.ht); if (scatterer.flags.use_u_aniso()) { f_t dw = adptbx::debye_waller_factor_u_star(g.hr, scatterer.u_star); term *= dw; if (scatterer.anharmonic_adp) { term *= scatterer.anharmonic_adp->calculate(g.hr); } } f_calc += term; } if (hr_ht.is_origin_centric) { f_calc = c_t(2*f_calc.real(),0); } else if (hr_ht.is_centric) { f_calc += std::conj(f_calc) * hr_ht.f_h_inv_t; } if (scatterer.flags.use_u_iso() && scatterer.u_iso != 0) { f_t dw=adptbx::debye_waller_factor_u_iso(d_star_sq/4, scatterer.u_iso); f_calc *= dw; } f_t w = scatterer.weight(); f_t f0p_w = (f0 + scatterer.fp) * w; f_t fdp_w = scatterer.fdp; if (fdp_w != 0) { fdp_w *= w; f_calc *= c_t(f0p_w, fdp_w); } else { f_calc *= f0p_w; } sum_f_calc += f_calc; } complex_type f_calc() { return sum_f_calc * hr_ht.ltr_factor; } CosSinType const &cos_sin; hr_ht_cache<float_type> hr_ht; float_type d_star_sq; complex_type sum_f_calc; }; template <class ScattererType=scatterer<> > class direct { public: typedef ScattererType scatterer_type; typedef typename ScattererType::float_type float_type; direct() {} direct( uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { math::cos_sin_exact<float_type> cos_sin; compute(cos_sin, unit_cell, space_group, miller_indices, scatterers, scattering_type_registry); } template<class CosSinType> direct( CosSinType const& cos_sin, uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { compute(cos_sin, unit_cell, space_group, miller_indices, scatterers, scattering_type_registry); } af::shared<std::complex<float_type> > const& f_calc() const { return f_calc_; } private: af::shared<std::complex<float_type> > f_calc_; template <typename CosSinType> void compute( CosSinType const& cos_sin, uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { typedef float_type f_t; typedef std::complex<float_type> c_t; int n = static_cast<int>(miller_indices.size()); f_calc_ = af::shared<c_t>(n, af::init_functor_null<c_t>()); c_t *f_calc_beg = f_calc_.begin(); af::shared<std::size_t> scattering_type_indices = scattering_type_registry.unique_indices(scatterers); /* The OpenMP standard specifies that A throw executed inside a parallel region must cause execution to resume within the same parallel region, and it must be caught by the same thread that threw the exception. Since a std::runtime_error may be thrown during Debye-Waller computations (c.f. adptbx.h, function debye_waller_factor_exp) one must make sure it cannot escape the body of the parallelised loop. So we catch it inside the loop and then re-throw it immediately after the loop finished. */ boost::optional<std::runtime_error> error; #if !defined(CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_NO_PRAGMA_OMP) #if !defined(__DECCXX_VER) || (defined(_OPENMP) && _OPENMP > 199819) #pragma omp parallel for schedule(static) #endif #endif for(int i=0;i<n;i++) { try { miller::index<> h = miller_indices[i]; f_t d_star_sq = unit_cell.d_star_sq(h); af::shared<double> form_factors = scattering_type_registry.unique_form_factors_at_d_star_sq( d_star_sq); direct_sum_over_equivalent_h<CosSinType, ScattererType> sum(cos_sin, space_group, h, d_star_sq); for(std::size_t j=0; j<scatterers.size(); ++j) { sum.add_contribution_of(scatterers[j], form_factors[scattering_type_indices[j]]); } f_calc_beg[i] = sum.f_calc(); } catch (std::runtime_error e) { #pragma omp critical { // The first error will be recorded only. if (!error) error = e; } } } if (error) throw *error; } }; }}} // namespace cctbx::xray::structure_factors #endif // CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H
target_data_array_extension.c
// -------------------------------------------------- // Check extends before // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // -------------------------------------------------- // Check extends after // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // END. #include <stdio.h> #define BEFORE 0 #define AFTER 1 #define SIZE 100 #if EXTENDS == BEFORE # define SMALL_BEG (SIZE-2) # define SMALL_END SIZE # define LARGE_BEG 0 # define LARGE_END SIZE #elif EXTENDS == AFTER # define SMALL_BEG 0 # define SMALL_END 2 # define LARGE_BEG 0 # define LARGE_END SIZE #else # error EXTENDS undefined #endif #define SMALL_SIZE (SMALL_END-SMALL_BEG) #define LARGE_SIZE (LARGE_END-LARGE_BEG) #define SMALL SMALL_BEG:SMALL_SIZE #define LARGE LARGE_BEG:LARGE_SIZE int main() { int arr[SIZE]; // CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG], SMALL_SIZE * sizeof arr[0]); // CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG], LARGE_SIZE * sizeof arr[0]); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: arr[LARGE]) { #pragma omp target data map(present, tofrom: arr[SMALL]) ; } // CHECK: arr is present fprintf(stderr, "arr is present\n"); // CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes) // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes) // CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer ('present' map type modifier). // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(alloc: arr[SMALL]) { #pragma omp target data map(present, tofrom: arr[LARGE]) ; } // CHECK-NOT: arr is present fprintf(stderr, "arr is present\n"); return 0; }
parallel_for.h
/*! * Copyright (c) 2021 by Contributors * \file runtime/container.h * \brief Defines the container object data structures. */ #ifndef DGL_RUNTIME_PARALLEL_FOR_H_ #define DGL_RUNTIME_PARALLEL_FOR_H_ #include <dmlc/omp.h> #include <algorithm> #include <string> #include <cstdlib> #include <exception> #include <atomic> namespace { int64_t divup(int64_t x, int64_t y) { return (x + y - 1) / y; } } namespace dgl { namespace runtime { namespace { size_t compute_num_threads(size_t begin, size_t end, size_t grain_size) { if (omp_in_parallel() || end - begin <= grain_size || end - begin == 1) return 1; return std::min(static_cast<int64_t>(omp_get_max_threads()), divup(end - begin, grain_size)); } struct DefaultGrainSizeT { size_t grain_size; DefaultGrainSizeT() { auto var = std::getenv("DGL_PARALLEL_FOR_GRAIN_SIZE"); if (!var) { grain_size = 1; } else { grain_size = std::stoul(var); } } size_t operator()() { return grain_size; } }; } // namespace static DefaultGrainSizeT default_grain_size; /*! * \brief OpenMP-based parallel for loop. * * It requires each thread's workload to have at least \a grain_size elements. * The loop body will be a function that takes in a single argument \a i, which * stands for the index of the workload. */ template <typename F> void parallel_for( const size_t begin, const size_t end, const size_t grain_size, F&& f) { if (begin >= end) { return; } #ifdef _OPENMP auto num_threads = compute_num_threads(begin, end, grain_size); // (BarclayII) the exception code is borrowed from PyTorch. std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; #pragma omp parallel num_threads(num_threads) { auto tid = omp_get_thread_num(); auto chunk_size = divup((end - begin), num_threads); auto begin_tid = begin + tid * chunk_size; if (begin_tid < end) { auto end_tid = std::min(end, chunk_size + begin_tid); try { f(begin_tid, end_tid); } catch (...) { if (!err_flag.test_and_set()) eptr = std::current_exception(); } } } if (eptr) std::rethrow_exception(eptr); #else f(begin, end); #endif } /*! * \brief OpenMP-based parallel for loop with default grain size. * * parallel_for with grain size to default value, either 1 or controlled through * environment variable DGL_PARALLEL_FOR_GRAIN_SIZE. * If grain size is set to 1, the function behaves the same way as OpenMP * parallel for pragma with static scheduling. */ template <typename F> void parallel_for( const size_t begin, const size_t end, F&& f) { parallel_for(begin, end, default_grain_size(), std::forward<F>(f)); } } // namespace runtime } // namespace dgl #endif // DGL_RUNTIME_PARALLEL_FOR_H_
GB_unop__identity_int16_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int16_int64 // op(A') function: GB_unop_tran__identity_int16_int64 // C type: int16_t // A type: int64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int16_int64 ( int16_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int16_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
create_dummy_ciphertext.c
#include <ristretto_elgamal.h> #include <omp.h> #include <time.h> #include <stdio.h> /* * This executable program creates dummy ciphertexts under a specific pair of public keys * In order to create the dummy ciphertext for 574-block case, * the stack size limit may need to be elevated. */ int main() { printf("\033[0;32m[INFO]\033[0m Loading public keys from the current directory...\n"); ristretto255_point_t pk[59]; LoadPubKey(pk, "./pub.key"); fastecexp_state st_pk[60]; char filename[59][150]; #pragma omp parallel for for (int i = 0; i < 59; i++) { sprintf(filename[i], "/table/pub_%d.tab", i); TableLoad(&st_pk[i], filename[i]); } TableLoad(&st_pk[59], "/table/pub_base.tab"); printf("\033[0;32m[INFO]\033[0m Public keys loaded.\n"); int BLOCK_array[4]; BLOCK_array[0] = 3; BLOCK_array[1] = 9; BLOCK_array[2] = 36; BLOCK_array[3] = 574; /* 1MB => 574, 64KB => 36, 16KB => 9, 4KB => 3 */ for (int BLOCK_array_index = 0; BLOCK_array_index < 4; BLOCK_array_index++) { int BLOCK = BLOCK_array[BLOCK_array_index]; printf("\033[0;32m[INFO]\033[0m Preparing a dummy ciphertext of %d blocks.\n", BLOCK); uint8_t input[1827 * BLOCK]; memset(input, 0, sizeof(input)); FILE *rand_src = fopen("/dev/urandom", "rb"); /* encode */ ristretto255_point_t output[59 * BLOCK]; ristretto_elgamal_encode(output, input, 0, 1827 * BLOCK); /* encrypt */ ristretto255_point_t ct[60 * BLOCK]; for (int i = 0; i < BLOCK; i++) { Encrypt(&ct[i * 60], &output[i * 59], st_pk, rand_src); } fclose(rand_src); /* encode the ciphertext */ size_t serialized_ct_size = Serialize_Honest_Size(60 * BLOCK); unsigned char *str = malloc(sizeof(char) * serialized_ct_size); Serialize_Honest(str, ct, 60 * BLOCK); /* encode the plaintext */ size_t serialized_pt_size = Serialize_Honest_Size(59 * BLOCK); unsigned char *str_pt = malloc(sizeof(char) * serialized_pt_size); Serialize_Honest(str_pt, output, 59 * BLOCK); Deserialize_Honest(output, str_pt, 59 * BLOCK); printf("\033[0;32m[INFO]\033[0m Writing the dummy ciphertext of %d blocks to ./data/dummy_ciphertext_%d.\n", BLOCK, BLOCK); char filename_dummy_ciphertext[150]; sprintf(filename_dummy_ciphertext, "./data/dummy_ciphertext_%d", BLOCK); FILE *fp_dummy_ciphertext = fopen(filename_dummy_ciphertext, "wb"); if (fp_dummy_ciphertext == NULL) { printf("\033[0;31m[ERROR]\033[0m Failed to write the dummy ciphertext.\n"); exit(1); } fwrite(str, serialized_ct_size, 1, fp_dummy_ciphertext); fclose(fp_dummy_ciphertext); printf("\033[0;32m[INFO]\033[0m Dummy ciphertext of %d blocks written.\n", BLOCK); printf("\033[0;32m[INFO]\033[0m Writing the dummy plaintext of %d blocks to ./data/dummy_plaintext_%d.\n", BLOCK, BLOCK); char filename_dummy_plaintext[150]; sprintf(filename_dummy_plaintext, "./data/dummy_plaintext_%d", BLOCK); FILE *fp_dummy_plaintext = fopen(filename_dummy_plaintext, "wb"); if (fp_dummy_plaintext == NULL) { printf("\033[0;31m[ERROR]\033[0m Failed to write the dummy plaintext.\n"); exit(1); } fwrite(str_pt, serialized_pt_size, 1, fp_dummy_plaintext); fclose(fp_dummy_plaintext); printf("\033[0;32m[INFO]\033[0m Dummy plaintext of %d blocks written.\n", BLOCK); printf("\033[0;32m[INFO]\033\033\033[0m Checking if the dummy ciphertext can be decrypted correctly...\n"); uint8_t recovered[1827 * BLOCK]; size_t actual_size; memset(input, 0, sizeof(input)); ristretto_elgamal_decode(recovered, output, 59 * BLOCK, &actual_size, 1827 * BLOCK); printf("\033[0;32m[INFO]\033\033[0m Decrypted plaintext has a size of %ld bytes (expected: 0 bytes).\n", actual_size); free(str); } for (int i = 0; i < 59; i++) { TableRelease(&st_pk[i]); } return 0; }
libimagequant.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <stdint.h> #include <limits.h> #if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800) #error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher." #error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version." #endif float liqpowf(float x, float y); #ifdef _OPENMP #include <omp.h> #define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */ #else #define LIQ_TEMP_ROW_WIDTH(img_width) (img_width) #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #include "nearest.h" #include "blur.h" #include "kmeans.h" #define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */ // each structure has a pointer as a unique identifier that allows type checking at run time static const char liq_attr_magic[] = "liq_attr"; static const char liq_image_magic[] = "liq_image"; static const char liq_result_magic[] = "liq_result"; static const char liq_histogram_magic[] = "liq_histogram"; static const char liq_remapping_result_magic[] = "liq_remapping_result"; static const char liq_freed_magic[] = "free"; #define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic) #define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr) struct liq_attr { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); double target_mse, max_mse, kmeans_iteration_limit; float min_opaque_val; unsigned int max_colors, max_histogram_entries; unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */; unsigned int kmeans_iterations, feedback_loop_trials; bool last_index_transparent, use_contrast_maps; unsigned char use_dither_map; unsigned char speed; unsigned char progress_stage1, progress_stage2, progress_stage3; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_log_callback_function *log_callback; void *log_callback_user_info; liq_log_flush_callback_function *log_flush_callback; void *log_flush_callback_user_info; }; struct liq_image { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); f_pixel *f_pixels; rgba_pixel **rows; double gamma; unsigned int width, height; unsigned char *importance_map, *edges, *dither_map; rgba_pixel *pixels, *temp_row; f_pixel *temp_f_row; liq_image_get_rgba_row_callback *row_callback; void *row_callback_user_info; liq_image *background; float min_opaque_val; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; bool free_pixels, free_rows, free_rows_internal; }; typedef struct liq_remapping_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); unsigned char *pixels; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; double gamma, palette_error; float dither_level; unsigned char use_dither_map; unsigned char progress_stage1; } liq_remapping_result; struct liq_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); liq_remapping_result *remapping; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; float dither_level; double gamma, palette_error; int min_posterization_output; unsigned char use_dither_map; }; struct liq_histogram { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); struct acolorhash_table *acht; double gamma; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; unsigned short ignorebits; bool had_image_added; }; static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL; static void contrast_maps(liq_image *image) LIQ_NONNULL; static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL; static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL; static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL; static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL; static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL; static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL; static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL; LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); LIQ_ARRAY(char, buf, required_space); va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context, buf, context->log_callback_user_info); } } LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg) { if (attr->log_callback) { attr->log_callback(attr, msg, attr->log_callback_user_info); } } LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr) { if (attr->log_flush_callback) { attr->log_flush_callback(attr, attr->log_flush_callback_user_info); } } LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent) { return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info); } LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent) { return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info); } #if USE_SSE inline static bool is_sse_available() { #if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64)) return true; #elif _MSC_VER int info[4]; __cpuid(info, 1); /* bool is implemented as a built-in type of size 1 in MSVC */ return info[3] & (1<<26) ? true : false; #else int a,b,c,d; cpuid(1, a, b, c, d); return d & (1<<25); // edx bit 25 is set when SSE is present #endif } #endif /* make it clear in backtrace when user-supplied handle points to invalid memory */ NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header); LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header) { if (!user_supplied_pointer) { return false; } if (user_supplied_pointer->magic_header == liq_freed_magic) { fprintf(stderr, "%s used after being freed", expected_magic_header); // this is not normal error handling, this is programmer error that should crash the program. // program cannot safely continue if memory has been used after it's been freed. // abort() is nasty, but security vulnerability may be worse. abort(); } return user_supplied_pointer->magic_header == expected_magic_header; } NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer); LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer) { if (!pointer) { return false; } // Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not. // It doesn't matter what value is read, the code here is just to shut the compiler up about unused read. char test_access = *((volatile char *)pointer); return test_access || true; } LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf(attr, " error: %s", msg); } static double quality_to_mse(long quality) { if (quality == 0) { return MAX_DIFF; } if (quality == 100) { return 0; } // curve fudged to be roughly similar to quality of libjpeg // except lowest 10 for really low number of colors const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001); return extra_low_quality_fudge + 2.5/(double)liqpowf(210.0 + quality, 1.2) * (100.1-quality)/100.0; } static unsigned int mse_to_quality(double mse) { for(int i=100; i > 0; i--) { if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors return i; } } return 0; } /** internally MSE is a sum of all channels with pixels 0..1 range, but other software gives per-RGB-channel MSE for 0..255 range */ static double mse_to_standard_mse(double mse) { return mse * 65536.0/6.0; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE; attr->target_mse = quality_to_mse(target); attr->max_mse = quality_to_mse(minimum); return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->max_mse); } LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->target_mse); } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE; attr->max_colors = colors; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->max_colors; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE; attr->min_posterization_output = bits; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->min_posterization_output; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE; unsigned int iterations = MAX(8-speed, 0); iterations += iterations * iterations/2; attr->kmeans_iterations = iterations; attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed)); attr->feedback_loop_trials = MAX(56-9*speed, 0); attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed); attr->min_posterization_input = (speed >= 8) ? 1 : 0; attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping if (attr->use_dither_map && speed < 3) { attr->use_dither_map = 2; // always } attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map; attr->speed = speed; attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8; if (attr->feedback_loop_trials < 2) { attr->progress_stage1 += 30; } attr->progress_stage3 = 50 / (1+speed); attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->speed; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } res->gamma = gamma; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE; attr->min_opaque_val = (double)min/255.0; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return MIN(255.f, 256.f * attr->min_opaque_val); } LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->last_index_transparent = !!is_last; } LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->progress_callback = callback; attr->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return; result->progress_callback = callback; result->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf_flush(attr); attr->log_callback = callback; attr->log_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->log_flush_callback = callback; attr->log_flush_callback_user_info = user_info; } LIQ_EXPORT liq_attr* liq_attr_create() { return liq_attr_create_with_allocator(NULL, NULL); } LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return; } liq_verbose_printf_flush(attr); attr->magic_header = liq_freed_magic; attr->free(attr); } LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig) { if (!CHECK_STRUCT_TYPE(orig, liq_attr)) { return NULL; } liq_attr *attr = orig->malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = *orig; return attr; } static void *liq_aligned_malloc(size_t size) { unsigned char *ptr = malloc(size + 16); if (!ptr) { return NULL; } uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1] ptr += offset; assert(0 == (((uintptr_t)ptr) & 15)); ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free() return ptr; } LIQ_NONNULL static void liq_aligned_free(void *inptr) { unsigned char *ptr = inptr; size_t offset = ptr[-1] ^ 0x59; assert(offset > 0 && offset <= 16); free(ptr - offset); } LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*)) { #if USE_SSE if (!is_sse_available()) { return NULL; } #endif if (!custom_malloc && !custom_free) { custom_malloc = liq_aligned_malloc; custom_free = liq_aligned_free; } else if (!custom_malloc != !custom_free) { return NULL; // either specify both or none } liq_attr *attr = custom_malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = (liq_attr) { .magic_header = liq_attr_magic, .malloc = custom_malloc, .free = custom_free, .max_colors = 256, .min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha) .last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles. .target_mse = 0, .max_mse = MAX_DIFF, }; liq_set_speed(attr, 4); return attr; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED; float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return LIQ_OK; } LIQ_NONNULL static liq_error liq_histogram_add_fixed_color_f(liq_histogram *hist, f_pixel color) { if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED; hist->fixed_colors[hist->fixed_colors_count++] = color; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, liq_color color, double gamma) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return LIQ_INVALID_POINTER; float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma ? gamma : 0.45455); const f_pixel px = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return liq_histogram_add_fixed_color_f(hist, px); } LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img) { img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads()); return img->temp_f_row != NULL; } LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint) { return (size_t)img->width * (size_t)img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow } static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma) { if (gamma < 0 || gamma > 1.0) { liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)"); return NULL; } if (!rows && !row_callback) { liq_log_error(attr, "missing row data"); return NULL; } liq_image *img = attr->malloc(sizeof(liq_image)); if (!img) return NULL; *img = (liq_image){ .magic_header = liq_image_magic, .malloc = attr->malloc, .free = attr->free, .width = width, .height = height, .gamma = gamma ? gamma : 0.45455, .rows = rows, .row_callback = row_callback, .row_callback_user_info = row_callback_user_info, .min_opaque_val = attr->min_opaque_val, }; if (!rows || attr->min_opaque_val < 1.f) { img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads()); if (!img->temp_row) return NULL; } // if image is huge or converted pixels are not likely to be reused then don't cache converted pixels if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) { verbose_print(attr, " conserving memory"); if (!liq_image_use_low_memory(img)) return NULL; } if (img->min_opaque_val < 1.f) { verbose_print(attr, " Working around IE6 bug by making image less transparent..."); } return img; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) { return LIQ_VALUE_OUT_OF_RANGE; } if (ownership_flags & LIQ_OWN_ROWS) { if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE; img->free_rows = true; } if (ownership_flags & LIQ_OWN_PIXELS) { img->free_pixels = true; if (!img->pixels) { // for simplicity of this API there's no explicit bitmap argument, // so the row with the lowest address is assumed to be at the start of the bitmap img->pixels = img->rows[0]; for(unsigned int i=1; i < img->height; i++) { img->pixels = MIN(img->pixels, img->rows[i]); } } } return LIQ_OK; } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image); LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image); LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER; const size_t required_size = (size_t)img->width * (size_t)img->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } if (ownership == LIQ_COPY_PIXELS) { unsigned char *tmp = img->malloc(required_size); if (!tmp) { return LIQ_OUT_OF_MEMORY; } memcpy(tmp, importance_map, required_size); importance_map = tmp; } else if (ownership != LIQ_OWN_PIXELS) { return LIQ_UNSUPPORTED; } liq_image_free_importance_map(img); img->importance_map = importance_map; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER; if (background->background) { return LIQ_UNSUPPORTED; } if (img->width != background->width || img->height != background->height) { return LIQ_BUFFER_TOO_SMALL; } if (img->background) { liq_image_destroy(img->background); } img->background = background; liq_image_free_maps(img); // Force them to be re-analyzed with the background return LIQ_OK; } LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return false; } if (width <= 0 || height <= 0) { liq_log_error(attr, "width and height must be > 0"); return false; } if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) { liq_log_error(attr, "image too large"); return false; } return true; } LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma); } LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } for(int i=0; i < height; i++) { if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) { liq_log_error(attr, "invalid row pointers"); return NULL; } } return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma); } LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } if (!CHECK_USER_POINTER(bitmap)) { liq_log_error(attr, "invalid bitmap pointer"); return NULL; } rgba_pixel *const pixels = (rgba_pixel *const)bitmap; rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height); if (!rows) return NULL; for(int i=0; i < height; i++) { rows[i] = pixels + width * i; } liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma); if (!image) { attr->free(rows); return NULL; } image->free_rows = true; image->free_rows_internal = true; return image; } NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info); LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info) { assert(callback); assert(temp_row); callback(temp_row, row, width, user_info); } LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img) { if (!CHECK_STRUCT_TYPE(img, liq_image)) { return false; } return img->rows || (img->temp_row && img->row_callback); } LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img) { assert(liq_image_has_rgba_pixels(img)); const bool iebug = img->min_opaque_val < 1.f; return (img->rows && !iebug); } LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row) { if (liq_image_can_use_rgba_rows(img)) { return img->rows[row]; } assert(img->temp_row); rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); if (img->rows) { memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0])); } else { liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info); } if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row); return temp_row; } LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[]) { assert(row_f_pixels); assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15)); const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row); for(unsigned int col=0; col < img->width; col++) { row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]); } } LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img) { assert(omp_get_thread_num() == 0); if (img->f_pixels) { return true; } if (!liq_image_should_use_low_memory(img, false)) { img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height); } if (!img->f_pixels) { return liq_image_use_low_memory(img); } if (!liq_image_has_rgba_pixels(img)) { return false; } float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); for(unsigned int i=0; i < img->height; i++) { convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut); } return true; } LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row) { if (!img->f_pixels) { assert(img->temp_f_row); // init should have done that float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); convert_row_to_f(img, row_for_thread, row, gamma_lut); return row_for_thread; } return img->f_pixels + img->width * row; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->width; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->height; } typedef void free_func(void*); LIQ_NONNULL static free_func *get_default_free_func(liq_image *img) { // When default allocator is used then user-supplied pointers must be freed with free() if (img->free_rows_internal || img->free != liq_aligned_free) { return img->free; } return free; } LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image) { if (input_image->free_pixels && input_image->pixels) { get_default_free_func(input_image)(input_image->pixels); input_image->pixels = NULL; } if (input_image->free_rows && input_image->rows) { get_default_free_func(input_image)(input_image->rows); input_image->rows = NULL; } } LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) { if (input_image->importance_map) { input_image->free(input_image->importance_map); input_image->importance_map = NULL; } } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) { liq_image_free_importance_map(input_image); if (input_image->edges) { input_image->free(input_image->edges); input_image->edges = NULL; } if (input_image->dither_map) { input_image->free(input_image->dither_map); input_image->dither_map = NULL; } } LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return; liq_image_free_rgba_source(input_image); liq_image_free_maps(input_image); if (input_image->f_pixels) { input_image->free(input_image->f_pixels); } if (input_image->temp_row) { input_image->free(input_image->temp_row); } if (input_image->temp_f_row) { input_image->free(input_image->temp_f_row); } if (input_image->background) { liq_image_destroy(input_image->background); } input_image->magic_header = liq_freed_magic; input_image->free(input_image); } LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return NULL; } liq_histogram *hist = attr->malloc(sizeof(liq_histogram)); if (!hist) return NULL; *hist = (liq_histogram) { .magic_header = liq_histogram_magic, .malloc = attr->malloc, .free = attr->free, .ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input), }; return hist; } LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return; hist->magic_header = liq_freed_magic; pam_freeacolorhash(hist->acht); hist->free(hist); } LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img) { liq_result *res; if (LIQ_OK != liq_image_quantize(img, attr, &res)) { return NULL; } return res; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!liq_image_has_rgba_pixels(img)) { return LIQ_UNSUPPORTED; } liq_histogram *hist = liq_histogram_create(attr); if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_error err = liq_histogram_add_image(hist, attr, img); if (LIQ_OK != err) { return err; } err = liq_histogram_quantize_internal(hist, attr, false, result_output); liq_histogram_destroy(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) { return liq_histogram_quantize_internal(input_hist, attr, true, result_output); } LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) { if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER; *result_output = NULL; if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (liq_progress(attr, 0)) return LIQ_ABORTED; histogram *hist; liq_error err = finalize_histogram(input_hist, attr, &hist); if (err != LIQ_OK) { return err; } err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output); pam_freeacolorhist(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } if (dither_level < 0 || dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE; res->dither_level = dither_level; return LIQ_OK; } LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return NULL; } liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result)); if (!res) return NULL; *res = (liq_remapping_result) { .magic_header = liq_remapping_result_magic, .malloc = result->malloc, .free = result->free, .dither_level = result->dither_level, .use_dither_map = result->use_dither_map, .palette_error = result->palette_error, .gamma = result->gamma, .palette = pam_duplicate_colormap(result->palette), .progress_callback = result->progress_callback, .progress_callback_user_info = result->progress_callback_user_info, .progress_stage1 = result->use_dither_map ? 20 : 0, }; return res; } LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; return result->gamma; } LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return; if (result->palette) pam_freecolormap(result->palette); if (result->pixels) result->free(result->pixels); result->magic_header = liq_freed_magic; result->free(result); } LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return; memset(&res->int_palette, 0, sizeof(liq_palette)); if (res->remapping) { memset(&res->remapping->int_palette, 0, sizeof(liq_palette)); liq_remapping_result_destroy(res->remapping); } pam_freecolormap(res->palette); res->magic_header = liq_freed_magic; res->free(res); } LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_standard_mse(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_standard_mse(result->remapping->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_quality(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_quality(result->remapping->palette_error); } return -1; } LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2) { const float v1 = ((const colormap_item*)ch1)->popularity; const float v2 = ((const colormap_item*)ch2)->popularity; return v1 > v2 ? -1 : 1; } LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem) { if (!nelem) return; qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity); } #define SWAP_PALETTE(map, a,b) { \ const colormap_item tmp = (map)->palette[(a)]; \ (map)->palette[(a)] = (map)->palette[(b)]; \ (map)->palette[(b)] = tmp; } LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options) { /* ** Step 3.5 [GRR]: remap the palette colors so that all entries with ** the maximal alpha value (i.e., fully opaque) are at the end and can ** therefore be omitted from the tRNS chunk. */ if (options->last_index_transparent) { for(unsigned int i=0; i < map->colors; i++) { if (map->palette[i].acolor.a < 1.f/256.f) { const unsigned int old = i, transparent_dest = map->colors-1; SWAP_PALETTE(map, transparent_dest, old); /* colors sorted by popularity make pngs slightly more compressible */ sort_palette_qsort(map, 0, map->colors-1); return; } } } unsigned int non_fixed_colors = 0; for(unsigned int i = 0; i < map->colors; i++) { if (map->palette[i].fixed) { break; } non_fixed_colors++; } /* move transparent colors to the beginning to shrink trns chunk */ unsigned int num_transparent = 0; for(unsigned int i = 0; i < non_fixed_colors; i++) { if (map->palette[i].acolor.a < 255.f/256.f) { // current transparent color is swapped with earlier opaque one if (i != num_transparent) { SWAP_PALETTE(map, num_transparent, i); i--; } num_transparent++; } } liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies"); /* colors sorted by popularity make pngs slightly more compressible * opaque and transparent are sorted separately */ sort_palette_qsort(map, 0, num_transparent); sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent); if (non_fixed_colors > 9 && map->colors > 16) { SWAP_PALETTE(map, 7, 1); // slightly improves compression SWAP_PALETTE(map, 8, 2); SWAP_PALETTE(map, 9, 3); } } inline static unsigned int posterize_channel(unsigned int color, unsigned int bits) { return (color & ~((1<<bits)-1)) | (color >> (8-bits)); } LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize) { float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma); dest->count = map->colors; for(unsigned int x = 0; x < map->colors; ++x) { rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor); px.r = posterize_channel(px.r, posterize); px.g = posterize_channel(px.g, posterize); px.b = posterize_channel(px.b, posterize); px.a = posterize_channel(px.a, posterize); map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */ if (!px.a && !map->palette[x].fixed) { px.r = 71; px.g = 112; px.b = 76; } dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a}; } } LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL; if (result->remapping && result->remapping->int_palette.count) { return &result->remapping->int_palette; } if (!result->int_palette.count) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output); } return &result->int_palette; } LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map) { const int rows = input_image->height; const unsigned int cols = input_image->width; double remapping_error=0; if (!liq_image_get_row_f_init(input_image)) { return -1; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return -1; } const colormap_item *acolormap = map->palette; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; const unsigned int max_threads = omp_get_max_threads(); LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads); kmeans_init(map, max_threads, average_color); #if __GNUC__ >= 9 #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap,average_color,cols,input_image,map,n,output_pixels,rows,transparent_index) reduction(+:remapping_error) #else #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error) #endif for(int row = 0; row < rows; ++row) { const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; unsigned int last_match=0; for(unsigned int col = 0; col < cols; ++col) { float diff; last_match = nearest_search(n, &row_pixels[col], last_match, &diff); if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) { last_match = transparent_index; } output_pixels[row][col] = last_match; remapping_error += diff; kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color); } } kmeans_finalize(map, max_threads, average_color); nearest_free(n); return remapping_error / (input_image->width * input_image->height); } inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px) { /* Use Floyd-Steinberg errors to adjust actual color. */ const float sr = thiserr.r * dither_level, sg = thiserr.g * dither_level, sb = thiserr.b * dither_level, sa = thiserr.a * dither_level; float ratio = 1.0; const float max_overflow = 1.1f; const float max_underflow = -0.1f; // allowing some overflow prevents undithered bands caused by clamping of all channels if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr); else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); } if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg); else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); } if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb); else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); } float a = px.a + sa; if (a > 1.f) { a = 1.f; } else if (a < 0) { a = 0; } // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa; if (dither_error > max_dither_error) { ratio *= 0.8f; } else if (dither_error < 2.f/256.f/256.f) { // don't dither areas that don't have noticeable error — makes file smaller return px; } return (f_pixel) { .r=px.r + sr * ratio, .g=px.g + sg * ratio, .b=px.b + sb * ratio, .a=a, }; } /** Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered. If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image. */ LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped) { const int rows = input_image->height, cols = input_image->width; const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL; const colormap *map = quant->palette; const colormap_item *acolormap = map->palette; if (!liq_image_get_row_f_init(input_image)) { return false; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return false; } /* Initialize Floyd-Steinberg error vectors. */ const size_t errwidth = cols+2; f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access if (!thiserr) return false; f_pixel *restrict nexterr = thiserr + errwidth; memset(thiserr, 0, errwidth * sizeof(thiserr[0])); bool ok = true; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; // response to this value is non-linear and without it any value < 0.8 would give almost no dithering float base_dithering_level = quant->dither_level; base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level); if (dither_map) { base_dithering_level *= 1.f/255.f; // convert byte to float } base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating int fs_direction = 1; unsigned int last_match=0; for (int row = 0; row < rows; ++row) { if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) { ok = false; break; } memset(nexterr, 0, errwidth * sizeof(nexterr[0])); int col = (fs_direction > 0) ? 0 : (cols - 1); const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; do { float dither_level = base_dithering_level; if (dither_map) { dither_level *= dither_map[row*cols + col]; } const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]); const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match; float diff; last_match = nearest_search(n, &spx, guessed_match, &diff); f_pixel output_px = acolormap[last_match].acolor; if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) { output_px = bg_pixels[col]; output_pixels[row][col] = transparent_index; } else { output_pixels[row][col] = last_match; } f_pixel err = { .r = (spx.r - output_px.r), .g = (spx.g - output_px.g), .b = (spx.b - output_px.b), .a = (spx.a - output_px.a), }; // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) { err.r *= 0.75f; err.g *= 0.75f; err.b *= 0.75f; err.a *= 0.75f; } /* Propagate Floyd-Steinberg error terms. */ if (fs_direction > 0) { thiserr[col + 2].a += err.a * (7.f/16.f); thiserr[col + 2].r += err.r * (7.f/16.f); thiserr[col + 2].g += err.g * (7.f/16.f); thiserr[col + 2].b += err.b * (7.f/16.f); nexterr[col + 2].a = err.a * (1.f/16.f); nexterr[col + 2].r = err.r * (1.f/16.f); nexterr[col + 2].g = err.g * (1.f/16.f); nexterr[col + 2].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col ].a += err.a * (3.f/16.f); nexterr[col ].r += err.r * (3.f/16.f); nexterr[col ].g += err.g * (3.f/16.f); nexterr[col ].b += err.b * (3.f/16.f); } else { thiserr[col ].a += err.a * (7.f/16.f); thiserr[col ].r += err.r * (7.f/16.f); thiserr[col ].g += err.g * (7.f/16.f); thiserr[col ].b += err.b * (7.f/16.f); nexterr[col ].a = err.a * (1.f/16.f); nexterr[col ].r = err.r * (1.f/16.f); nexterr[col ].g = err.g * (1.f/16.f); nexterr[col ].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col + 2].a += err.a * (3.f/16.f); nexterr[col + 2].r += err.r * (3.f/16.f); nexterr[col + 2].g += err.g * (3.f/16.f); nexterr[col + 2].b += err.b * (3.f/16.f); } // remapping is done in zig-zag col += fs_direction; if (fs_direction > 0) { if (col >= cols) break; } else { if (col < 0) break; } } while(1); f_pixel *const temperr = thiserr; thiserr = nexterr; nexterr = temperr; fs_direction = -fs_direction; } input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped nearest_free(n); return ok; } /* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */ LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse) { const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f); if (fixed_colors_count) { for(int j=0; j < hist->size; j++) { for(unsigned int i=0; i < fixed_colors_count; i++) { if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) { hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry j--; break; // continue searching histogram } } } } } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER; if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE; if (input_hist->ignorebits > 0 && input_hist->had_image_added) { return LIQ_UNSUPPORTED; } input_hist->ignorebits = 0; input_hist->had_image_added = true; input_hist->gamma = gamma ? gamma : 0.45455; if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free); if (!input_hist->acht) { return LIQ_OUT_OF_MEMORY; } } // Fake image size. It's only for hash size estimates. if (!input_hist->acht->cols) { input_hist->acht->cols = num_entries; } input_hist->acht->rows += num_entries; const unsigned int hash_size = input_hist->acht->hash_size; for(int i=0; i < num_entries; i++) { const rgba_pixel rgba = { .r = entries[i].color.r, .g = entries[i].color.g, .b = entries[i].color.b, .a = entries[i].color.a, }; union rgba_as_int px = {rgba}; unsigned int hash; if (px.rgba.a) { hash = px.l % hash_size; } else { hash=0; px.l=0; } if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) { return LIQ_OUT_OF_MEMORY; } } return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; const unsigned int cols = input_image->width, rows = input_image->height; if (!input_image->importance_map && options->use_contrast_maps) { contrast_maps(input_image); } input_hist->gamma = input_image->gamma; for(int i = 0; i < input_image->fixed_colors_count; i++) { liq_error res = liq_histogram_add_fixed_color_f(input_hist, input_image->fixed_colors[i]); if (res != LIQ_OK) { return res; } } /* ** Step 2: attempt to make a histogram of the colors, unclustered. ** If at first we don't succeed, increase ignorebits to increase color ** coherence and try again. */ if (liq_progress(options, options->progress_stage1 * 0.4f)) { return LIQ_ABORTED; } const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image); // Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not // the first image added const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries; do { if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free); } if (!input_hist->acht) return LIQ_OUT_OF_MEMORY; // histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important. // noise map does not include edges to avoid ruining anti-aliasing for(unsigned int row=0; row < rows; row++) { bool added_ok; if (all_rows_at_once) { added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map); if (added_ok) break; } else { const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) }; added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL); } if (!added_ok) { input_hist->ignorebits++; liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED; break; } } } while(!input_hist->acht); input_hist->had_image_added = true; liq_image_free_importance_map(input_image); if (input_image->free_pixels && input_image->f_pixels) { liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels } return LIQ_OK; } LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) { if (liq_progress(options, options->progress_stage1 * 0.9f)) { return LIQ_ABORTED; } if (!input_hist->acht) { return LIQ_BITMAP_NOT_AVAILABLE; } histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_verbose_printf(options, " made histogram...%d colors found", hist->size); remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse); *hist_output = hist; return LIQ_OK; } LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) { /* IE6 makes colors with even slightest transparency completely transparent, thus to improve situation in IE, make colors that are less than ~10% transparent completely opaque */ const float min_opaque_val = input_image->min_opaque_val; const float almost_opaque_val = min_opaque_val * 169.f/256.f; const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f; for(unsigned int col = 0; col < input_image->width; col++) { const rgba_pixel px = row_pixels[col]; /* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */ if (px.a >= almost_opaque_val_int) { float al = px.a / 255.f; al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val); al *= 256.f; row_pixels[col].a = al >= 255.f ? 255 : al; } } } /** Builds two maps: importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy. edges - noise map including all edges */ LIQ_NONNULL static void contrast_maps(liq_image *image) { const unsigned int cols = image->width, rows = image->height; if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) { return; } unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows); image->importance_map = NULL; unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows); image->edges = NULL; unsigned char *restrict tmp = image->malloc(cols*rows); if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) { image->free(noise); image->free(edges); image->free(tmp); return; } const f_pixel *curr_row, *prev_row, *next_row; curr_row = prev_row = next_row = liq_image_get_row_f(image, 0); for (unsigned int j=0; j < rows; j++) { prev_row = curr_row; curr_row = next_row; next_row = liq_image_get_row_f(image, MIN(rows-1,j+1)); f_pixel prev, curr = curr_row[0], next=curr; for (unsigned int i=0; i < cols; i++) { prev=curr; curr=next; next = curr_row[MIN(cols-1,i+1)]; // contrast is difference between pixels neighbouring horizontally and vertically const float a = fabsf(prev.a+next.a - curr.a*2.f), r = fabsf(prev.r+next.r - curr.r*2.f), g = fabsf(prev.g+next.g - curr.g*2.f), b = fabsf(prev.b+next.b - curr.b*2.f); const f_pixel prevl = prev_row[i]; const f_pixel nextl = next_row[i]; const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f), r1 = fabsf(prevl.r+nextl.r - curr.r*2.f), g1 = fabsf(prevl.g+nextl.g - curr.g*2.f), b1 = fabsf(prevl.b+nextl.b - curr.b*2.f); const float horiz = MAX(MAX(a,r),MAX(g,b)); const float vert = MAX(MAX(a1,r1),MAX(g1,b1)); const float edge = MAX(horiz,vert); float z = edge - fabsf(horiz-vert)*.5f; z = 1.f - MAX(z,MIN(horiz,vert)); z *= z; // noise is amplified z *= z; // 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely). const unsigned int z_int = 85 + (unsigned int)(z * 171.f); noise[j*cols+i] = MIN(z_int, 255); const int e_int = 255 - (int)(edge * 256.f); edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0; } } // noise areas are shrunk and then expanded to remove thin edges from the map liq_max3(noise, tmp, cols, rows); liq_max3(tmp, noise, cols, rows); liq_blur(noise, tmp, noise, cols, rows, 3); liq_max3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(edges, tmp, cols, rows); liq_max3(tmp, edges, cols, rows); for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]); image->free(tmp); image->importance_map = noise; image->edges = edges; } /** * Builds map of neighbor pixels mapped to the same palette entry * * For efficiency/simplicity it mainly looks for same consecutive pixels horizontally * and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly. * Correct flood fill doesn't have visually good properties. */ LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map) { const unsigned int width = input_image->width; const unsigned int height = input_image->height; unsigned char *const edges = input_image->edges; for(unsigned int row=0; row < height; row++) { unsigned char lastpixel = row_pointers[row][0]; unsigned int lastcol=0; for(unsigned int col=1; col < width; col++) { const unsigned char px = row_pointers[row][col]; if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) { // Transparency may or may not create an edge. When there's an explicit background set, assume no edge. continue; } if (px != lastpixel || col == width-1) { int neighbor_count = 10 * (col-lastcol); unsigned int i=lastcol; while(i < col) { if (row > 0) { unsigned char pixelabove = row_pointers[row-1][i]; if (pixelabove == lastpixel) neighbor_count += 15; } if (row < height-1) { unsigned char pixelbelow = row_pointers[row+1][i]; if (pixelbelow == lastpixel) neighbor_count += 15; } i++; } while(lastcol <= col) { int e = edges[row*width + lastcol]; edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count)); } lastpixel = px; } } } input_image->dither_map = input_image->edges; input_image->edges = NULL; } /** * Palette can be NULL, in which case it creates a new palette from scratch. */ static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*)) { if (!fixed_colors_count) return palette; colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free); unsigned int i=0; if (palette && fixed_colors_count < max_colors) { unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count); for(; i < palette_max; i++) { newpal->palette[i] = palette->palette[i]; } } for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) { newpal->palette[i++] = (colormap_item){ .acolor = fixed_colors[j], .fixed = true, }; } if (palette) pam_freecolormap(palette); return newpal; } LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff) { item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (__builtin_sqrtf(1.f+diff)); } /** Repeats mediancut with different histogram weights to find palette with minimum error. feedback_loop_trials controls how long the search will take. < 0 skips the iteration. */ static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p) { unsigned int max_colors = options->max_colors; // if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse // at this point actual gamma is not set, so very conservative posterization estimate is used const double target_mse = MIN(max_mse, MAX(options->target_mse, (double)liqpowf((1<<options->min_posterization_output)/1024.0, 2))); int feedback_loop_trials = options->feedback_loop_trials; if (hist->size > 5000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 25000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 50000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 100000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} colormap *acolormap = NULL; double least_error = MAX_DIFF; double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0; const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1); do { colormap *newmap; if (hist->size && fixed_colors_count < max_colors) { newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2, options->malloc, options->free); } else { feedback_loop_trials = 0; newmap = NULL; } newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); if (!newmap) { return NULL; } if (feedback_loop_trials <= 0) { return newmap; } // after palette has been created, total error (MSE) is calculated to keep the best palette // at the same time K-Means iteration is done to improve the palette // and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors const bool first_run_of_target_mse = !acolormap && target_mse > 0; double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback); // goal is to increase quality or to reduce number of colors used if quality is good enough if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) { if (acolormap) pam_freecolormap(acolormap); acolormap = newmap; if (total_error < target_mse && total_error > 0) { // K-Means iteration improves quality above what mediancut aims for // this compensates for it, making mediancut aim for worse target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error); } least_error = total_error; // if number of colors could be reduced, try to keep it that way // but allow extra color as a bit of wiggle room in case quality can be improved too max_colors = MIN(newmap->colors+1, max_colors); feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever } else { for(unsigned int j=0; j < hist->size; j++) { hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0; } target_mse_overshoot = 1.0; feedback_loop_trials -= 6; // if error is really bad, it's unlikely to improve, so end sooner if (total_error > least_error*4) feedback_loop_trials -= 3; pam_freecolormap(newmap); } float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials); if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break; liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done)); } while(feedback_loop_trials > 0); *palette_error_p = least_error; return acolormap; } static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) { if (!hist->size) { return NULL; } colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free); for(unsigned int i=0; i < hist->size; i++) { acolormap->palette[i].acolor = hist->achv[i].acolor; acolormap->palette[i].popularity = hist->achv[i].perceptual_weight; } return acolormap; } LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output) { colormap *acolormap; double palette_error = -1; assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1)); const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors; if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED; // If image has few colors to begin with (and no quality degradation is required) // then it's possible to skip quantization entirely if (few_input_colors && options->target_mse == 0) { acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); palette_error = 0; } else { const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error); if (!acolormap) { return LIQ_VALUE_OUT_OF_RANGE; } // K-Means iteration approaches local minimum for the palette double iteration_limit = options->kmeans_iteration_limit; unsigned int iterations = options->kmeans_iterations; if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work if (iterations) { // likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) { if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) { hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway } } if (hist->size > 5000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 25000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 50000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 100000) {iterations = (iterations*3 + 3)/4; iteration_limit *= 2;} verbose_print(options, " moving colormap towards local minimum"); double previous_palette_error = MAX_DIFF; for(unsigned int i=0; i < iterations; i++) { palette_error = kmeans_do_iteration(hist, acolormap, NULL); if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) { break; } if (fabs(previous_palette_error-palette_error) < iteration_limit) { break; } if (palette_error > max_mse*1.5) { // probably hopeless if (palette_error > max_mse*3.0) break; // definitely hopeless i++; } previous_palette_error = palette_error; } } if (palette_error > max_mse) { liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)", mse_to_standard_mse(palette_error), mse_to_quality(palette_error), mse_to_standard_mse(max_mse), mse_to_quality(max_mse)); pam_freecolormap(acolormap); return LIQ_QUALITY_TOO_LOW; } } if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) { pam_freecolormap(acolormap); return LIQ_ABORTED; } sort_palette(acolormap, options); // If palette was created from a multi-image histogram, // then it shouldn't be optimized for one image during remapping if (fixed_result_colors) { for(unsigned int i=0; i < acolormap->colors; i++) { acolormap->palette[i].fixed = true; } } liq_result *result = options->malloc(sizeof(liq_result)); if (!result) return LIQ_OUT_OF_MEMORY; *result = (liq_result){ .magic_header = liq_result_magic, .malloc = options->malloc, .free = options->free, .palette = acolormap, .palette_error = palette_error, .use_dither_map = options->use_dither_map, .gamma = gamma, .min_posterization_output = options->min_posterization_output, }; *result_output = result; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return LIQ_INVALID_POINTER; } if (!CHECK_STRUCT_TYPE(input_image, liq_image)) { return LIQ_INVALID_POINTER; } if (!CHECK_USER_POINTER(buffer)) { return LIQ_INVALID_POINTER; } const size_t required_size = (size_t)input_image->width * (size_t)input_image->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } LIQ_ARRAY(unsigned char *, rows, input_image->height); unsigned char *buffer_bytes = buffer; for(unsigned int i=0; i < input_image->height; i++) { rows[i] = &buffer_bytes[input_image->width * i]; } return liq_write_remapped_image_rows(result, input_image, rows); } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers) { if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; for(unsigned int i=0; i < input_image->height; i++) { if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER; } if (quant->remapping) { liq_remapping_result_destroy(quant->remapping); } liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant); if (!result) return LIQ_OUT_OF_MEMORY; if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) { contrast_maps(input_image); } if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) { return LIQ_ABORTED; } /* ** Step 4: map the colors in the image to their closest match in the ** new colormap, and write 'em out. */ float remapping_error = result->palette_error; if (result->dither_level == 0) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); remapping_error = remap_to_palette(input_image, row_pointers, result->palette); } else { const bool is_image_huge = (input_image->width * input_image->height) > 2000 * 2000; const bool allow_dither_map = result->use_dither_map == 2 || (!is_image_huge && result->use_dither_map); const bool generate_dither_map = allow_dither_map && (input_image->edges && !input_image->dither_map); if (generate_dither_map) { // If dithering (with dither map) is required, this image is used to find areas that require dithering remapping_error = remap_to_palette(input_image, row_pointers, result->palette); update_dither_map(input_image, row_pointers, result->palette); } if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) { return LIQ_ABORTED; } // remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) { return LIQ_ABORTED; } } // remapping error from dithered image is absurd, so always non-dithered value is used // palette_error includes some perceptual weighting from histogram which is closer correlated with dssim // so that should be used when possible. if (result->palette_error < 0) { result->palette_error = remapping_error; } return LIQ_OK; } LIQ_EXPORT int liq_version() { return LIQ_VERSION; }
gi_ondemand_accurate_grad_builder.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef ONDEMAND_ACCURATE_GRAD_BUILDER_H #define ONDEMAND_ACCURATE_GRAD_BUILDER_H #include <vector> #include <set> #include <queue> #include <time.h> #include "base/gi_timing.h" #include "base/gi_topological_regular_grid_3d.h" #include "base/gi_isolated_region_remover.h" #include "base/gi_isolated_region_remover_masked.h" #include "base/gi_numeric_integrator_path_compressing.h" #include "base/gi_numeric_streamline_integrator_digitizing.h" #include "base/gi_timing.h" #include "base/gi_adaptive_euler_advector_2d.h" #include "base/gi_adaptive_euler_advector_3d.h" #include "base/gi_advection_checkers.h" #include "base/gi_advection_events.h" #include "base/gi_index_comparer.h" #include "base/gi_maxmin_vertex_labeling.h" #include "base/gi_conforming_discrete_gradient.h" #include "base/gi_robins_sliding_regular_grid.h" #include "base/gi_labeling_to_bounary_labeling.h" #include "base/gi_topological_gradient_using_algorithms.h" #include "base/gi_topological_gradient_using_algorithms.h" #include "base/gi_isolated_region_remover.h" #include "base/gi_bifiltration_pairing.h" #include "base/gi_topological_max_vertex_mesh_function.h" #include "base/gi_extrema_region_builder.h" #include "base/gi_numeric_integrator_path_compressing.h" namespace GInt { //template <class GridType, class FuncType, class MeshType> //class DiscreteGradientContext { //protected: // int m_native_dimension; // int m_data_x; // int m_data_y; // int m_data_z; // GCGridType* mGrid; // GCMeshType* mMesh; // GCFuncType* mFunc; //public: // typedef GridType GCGridType; // typedef FuncType GCFuncType; // typedef MeshType GCMeshType; //}; class OndemandDiscreteGradientBuilder { public: int X, Y, Z; int iteration_limit; int per_x, per_y, per_z; float error_threshold, gradient_threshold; std::string filename; int parallelism = -1; int outputdebug = 0; float g_pre_simp_threshold = 0.0f; //// we need asc man 3 dsc man 3? int need_ASC_3 = false; int need_DSC_3 = false; int need_ASC_1 = false; int need_DSC_1 = false; int needsad = false; SimpleTimer* mTimer; typedef RegularGrid3D GridType; typedef RegularGridTrilinearFunction GridFuncType; //typedef UncachedRegularGridTrilinearFunction GridFuncType; typedef TopologicalRegularGrid3D MeshType; typedef IndexCompareLessThan<GridFuncType> ComparerASC; typedef IndexCompareGreaterThan<GridFuncType> ComparerDSC; //typedef MorseSmaleComplexBasic<FLOATTYPE, MeshType, MeshFuncType, GradType> MSCType; //typedef NumericIntegratorExpandingRegionStopWithCutoff<AdaptiveEulerAdvector3D<-1>, ComparerASC> IntegratorTypeWC; //typedef NumericIntegratorExpandingRegionStop<AdaptiveEulerAdvector3D<-1>, ComparerASC> IntegratorTypeASC; typedef NumericIntegratorPathCompressingToTerminal<AdaptiveEulerAdvector3D<GridFuncType, -1>, GridFuncType > IntegratorTypeASC; typedef NumericIntegratorPathCompressingToTerminal<AdaptiveEulerAdvector3D<GridFuncType, 1>, GridFuncType > IntegratorTypeDSC; typedef IsolatedRegionRemoverMasked<ComparerASC> RegionRemoverTypeASC; typedef IsolatedRegionRemoverMasked<ComparerDSC> RegionRemoverTypeDSC; typedef DigitizingNumericStreamlineIntegrator3dASC<MeshType, GridFuncType, AdaptiveEulerAdvector3D<GridFuncType, 1> > StreamlineIntegratorTypeASC; typedef DigitizingNumericStreamlineIntegrator3dDSC<MeshType, GridFuncType, AdaptiveEulerAdvector3D<GridFuncType, -1> > StreamlineIntegratorTypeDSC; typedef DiscreteGradientLabeling<MeshType> GradType; //typedef UncachedMaximumVertexLabeling<MeshType, GridFuncType> MaxVLType; //typedef MaximumVertexLabeling<MeshType, GridFuncType> MaxVLType; typedef RegularGridMaxMinVertexLabeling3D<MeshType, GridFuncType> MaxVLType; typedef MyRobinsNoalloc<MeshType, MaxVLType, GradType, 4, 6> RobinsType; typedef TopologicalMaxVertexMeshFunction<MeshType, MaxVLType, GridFuncType, float> MeshFuncType; typedef SlidingWindowRobinsNoalloc < RegularGrid3D, RegularGridTrilinearFunction, MeshType, MaxVLType, GradType> NewRobinsType; RegularGrid3D* g_grid; GridFuncType* g_rgt_func; MeshType *g_topo_grid; IntegratorTypeASC* g_num_integrator; //IntegratorTypeWC* g_num_integrator_with_cutoff; RegionRemoverTypeASC* g_region_remover; #ifdef USE_REGION_CLEANER VertexLabelingToBoundaryLabeling<INDEX_TYPE>* g_edge_map; #else VertexLabelingToBoundaryLabeling<int, MaxVLType>* g_edge_map; #endif MeshFuncType* g_topo_func; GradType *base_grad; //RobinsLabelingAlgorithm<MeshType, MeshFuncType> *g_robin_alg; TopologicalGradientUsingAlgorithms<MeshType, MeshFuncType, GradType>* g_topo_alg; StreamlineIntegratorTypeASC* g_digitizing_streamline_integrator_asc; StreamlineIntegratorTypeDSC* g_digitizing_streamline_integrator_dsc; MaxVLType* g_maxv_labeling; //void CombineLabels() { // INDEX_TYPE num = g_topo_grid->numCells(); // //#pragma omp parallel for // for (INDEX_TYPE i = 0; i < num; i++) { // char v1 = g_digitizing_streamline_integrator_asc->get_output()->GetLabel(i); // char v2 = g_edge_map->GetOutputLabels()->GetLabel(i); // // g_edge_map->GetOutputLabels()->SetLabel(i, max(v1, v2)); // } bool GetOptions(int argc, char** argv) { if (argc < 11) { printf("Usage: X Y Z filename error_threshold grad_threshold maxnumiter needASC1 needDSC1 needASC2 needDSC2 PresimpThesh [parallelism=ompmaxnumthreads] [outputdebug=0] [integrationinteraltimer=0]\n"); return 0; } sscanf(argv[1], "%d", &X); sscanf(argv[2], "%d", &Y); sscanf(argv[3], "%d", &Z); filename = std::string(argv[4]); sscanf(argv[5], "%f", &error_threshold); sscanf(argv[6], "%f", &gradient_threshold); sscanf(argv[7], "%d", &iteration_limit); sscanf(argv[8], "%d", &need_ASC_1); sscanf(argv[9], "%d", &need_DSC_1); sscanf(argv[10], "%d", &need_ASC_3); sscanf(argv[11], "%d", &need_DSC_3); sscanf(argv[12], "%f", &g_pre_simp_threshold); if (argc >= 14) sscanf(argv[13], "%d", &parallelism); // set remaining values if (parallelism != -1) { omp_set_num_threads(parallelism); } printf("dims=(%d,%d,%d)\nfile=%s\nintegration parameters: e=%f, gt=%f, il=%d\nondemandacc: a1=%d, d1=%d, a2=%d, d2=%d, ps=%f\npar=%d\n", X, Y, Z, argv[4], error_threshold, gradient_threshold, iteration_limit, need_ASC_1, need_DSC_1, need_ASC_3, need_DSC_3, g_pre_simp_threshold, parallelism); } void ReIntegrateUpFrom2Saddles(GradType* base_grad) { // FIRST gather all the critical 2-saddles from the discrete gradient int taskid = mTimer->StartTask("Topological GatherCritical2Saddles"); std::vector<INDEX_TYPE> topo_index_partition; int num_threads; std::vector<std::pair<float, INDEX_TYPE> > criticals; #pragma omp parallel { #pragma omp single { num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(g_topo_grid->numCells(), num_threads, topo_index_partition); } int thread_num = omp_get_thread_num(); // in parallel go through and find all 2-saddles std::vector<std::pair<float, INDEX_TYPE> > lcriticals; MeshType::DCellsIterator face_iterator(g_topo_grid, 2, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); for (face_iterator.begin(); face_iterator.valid(); face_iterator.advance()) { INDEX_TYPE cell_id = face_iterator.value(); if (base_grad->getCritical(cell_id)) { std::pair<float, INDEX_TYPE> p(g_topo_func->cellValue(cell_id), cell_id); lcriticals.push_back(p); } } #pragma omp critical { criticals.insert(criticals.end(), lcriticals.begin(), lcriticals.end()); } } mTimer->EndTask(taskid); taskid = mTimer->StartTask("Topological Sort2Saddles"); std::sort(criticals.begin(), criticals.end()); mTimer->EndTask(taskid); taskid = mTimer->StartTask("Integrate Digitize2Saddles"); INDEX_TYPE total_left = criticals.size() - 1; #pragma omp parallel { while (true) { INDEX_TYPE local_id; //int tots; #pragma omp critical { local_id = total_left; total_left -= 1; } if (local_id < 0) break; INDEX_TYPE sad_id = criticals[local_id].second; // //// now find each arc std::vector<INDEX_TYPE> result; std::queue<INDEX_TYPE> cell_queue; cell_queue.push(sad_id); // THIS IS A SUPER FAST WAY OF filling in gometry of arc... only 2 in each direction // gather the 4 hexes along paths on either side of the critical saddle std::set<INDEX_TYPE> cell_visited; int counter = 4; while (!cell_queue.empty() && counter >= 0) { INDEX_TYPE current = cell_queue.front(); cell_queue.pop(); cell_visited.insert(current); //result.push_back(current); MeshType::CofacetsIterator cofacets(g_topo_grid); for (cofacets.begin(current); cofacets.valid(); cofacets.advance()) { INDEX_TYPE temp_id = cofacets.value(); if (base_grad->getCritical(temp_id) && cell_visited.count(temp_id) == 0) { result.push_back(temp_id); cell_visited.insert(temp_id); } else if (cell_visited.count(temp_id) == 0) { INDEX_TYPE pair = base_grad->getPair(temp_id); result.push_back(temp_id); //result.push_back(pair); cell_visited.insert(temp_id); cell_visited.insert(pair); cell_queue.push(pair); } } counter--; } for (auto arc_hex_id : result) { if (g_topo_grid->dimension(arc_hex_id) != 3) continue; std::vector<Vec3d> points; std::vector<INDEX_TYPE> dline; Vec3d seed; g_topo_grid->centroid(arc_hex_id, seed); seed = seed * 0.5; // back to grid coordinates g_digitizing_streamline_integrator_asc->IntegrateStreamline(seed, points, dline); } //printf("\n"); g_digitizing_streamline_integrator_asc->set_label(sad_id); } } mTimer->EndTask(taskid); } void ReIntegrateDownFrom1Saddles(GradType* base_grad) { // FIRST gather all the critical 2-saddles from the discrete gradient int taskid = mTimer->StartTask("Topological GatherCritical1Saddles"); std::vector<INDEX_TYPE> topo_index_partition; int num_threads; std::vector<std::pair<float, INDEX_TYPE> > criticals; #pragma omp parallel { #pragma omp single { num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(g_topo_grid->numCells(), num_threads, topo_index_partition); } int thread_num = omp_get_thread_num(); // in parallel go through and find all 2-saddles std::vector<std::pair<float, INDEX_TYPE> > lcriticals; MeshType::DCellsIterator face_iterator(g_topo_grid, 1, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); for (face_iterator.begin(); face_iterator.valid(); face_iterator.advance()) { INDEX_TYPE cell_id = face_iterator.value(); if (base_grad->getCritical(cell_id)) { std::pair<float, INDEX_TYPE> p(-1 * g_topo_func->cellValue(cell_id), cell_id); lcriticals.push_back(p); } } #pragma omp critical { criticals.insert(criticals.end(), lcriticals.begin(), lcriticals.end()); } } mTimer->EndTask(taskid); taskid = mTimer->StartTask("Topological Sort1Saddles"); std::sort(criticals.begin(), criticals.end()); mTimer->EndTask(taskid); taskid = mTimer->StartTask("Integrate Digitize1Saddles"); INDEX_TYPE total_left = criticals.size() - 1; #pragma omp parallel { while (true) { INDEX_TYPE local_id; //int tots; #pragma omp critical { local_id = total_left; total_left -= 1; //tots = rand() % 10000; //printf("%d doing %llu %llu\n", omp_get_thread_num(), local_id, total_left); } if (local_id < 0) break; //std::vector<int> fff; //fff.clear(); //for (int i = 0; i < tots; i++) { // fff.push_back(i * i); //} //printf("fff size %d\n", fff.size()); INDEX_TYPE sad_id = criticals[local_id].second; // //// now find each arc std::vector<INDEX_TYPE> result; std::queue<INDEX_TYPE> cell_queue; cell_queue.push(sad_id); // THIS IS A SUPER FAST WAY OF filling in gometry of arc... only 2 in each direction // gather the 4 vertices along paths on either side of the critical saddle std::set<INDEX_TYPE> cell_visited; int counter = 4; while (!cell_queue.empty() && counter >= 0) { INDEX_TYPE current = cell_queue.front(); cell_queue.pop(); cell_visited.insert(current); //result.push_back(current); MeshType::FacetsIterator facets(g_topo_grid); for (facets.begin(current); facets.valid(); facets.advance()) { INDEX_TYPE temp_id = facets.value(); if (base_grad->getCritical(temp_id) && cell_visited.count(temp_id) == 0) { result.push_back(temp_id); cell_visited.insert(temp_id); } else if (cell_visited.count(temp_id) == 0) { INDEX_TYPE pair = base_grad->getPair(temp_id); result.push_back(temp_id); //result.push_back(pair); cell_visited.insert(temp_id); cell_visited.insert(pair); cell_queue.push(pair); } } counter--; } //printf("result size %d\n", result.size()); for (auto arc_vert_id : result) { //printf("%llu ", arc_hex_id); if (g_topo_grid->dimension(arc_vert_id) != 0) continue; std::vector<Vec3d> points; std::vector<INDEX_TYPE> dline; Vec3d seed; g_topo_grid->centroid(arc_vert_id, seed); seed = seed * 0.5; // back to grid coordinates g_digitizing_streamline_integrator_dsc->IntegrateStreamline(seed, points, dline); } //printf("\n"); g_digitizing_streamline_integrator_dsc->set_label(sad_id); } } mTimer->EndTask(taskid); } bool mGridInitialized; void InitializeGrid() { if (!mGridInitialized) { g_grid = new RegularGrid3D(Vec3i(X, Y, Z), Vec3b(0, 0, 0)); } } void do_work(){ printf("ondemand accuracy: a1=%d d1=%d a2=%d d2=%d\n", need_ASC_1, need_DSC_1, need_ASC_3, need_DSC_3); printf("SANITY\n"); // start timing overall algorithm mTimer = new SimpleTimer(SimpleTimer::TIMER_PRINT_START_END); char gradname[1024]; sprintf(gradname, "%s.grad", filename.c_str()); // will write timing to this file char timingname[2048]; sprintf(timingname, "%s.%03d.gtime.txt", filename.c_str(), parallelism); // START IO --------------------------- g_grid = new RegularGrid3D(Vec3i(X, Y, Z), Vec3b(0, 0, 0)); g_rgt_func = new GridFuncType(g_grid); g_rgt_func->LoadImageFromFile(filename.c_str()); mTimer->StartGlobal(); int taskid = mTimer->StartTask("Topological MaxMinVLabel"); g_topo_grid = new MeshType(g_grid); g_maxv_labeling = new MaxVLType(g_topo_grid, g_rgt_func); g_maxv_labeling->ComputeOutput(); g_topo_func = new MeshFuncType(); g_topo_func->setMeshAndFuncAndMaxVLabeling(g_topo_grid, g_rgt_func, g_maxv_labeling); mTimer->EndTask(taskid); // create a topology function //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- // DO FIRST DISCRETE GRADIENT COMPUTATION WITH NO RESTRICTION //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- taskid = mTimer->StartTask("Topological BaseRobins"); base_grad = new GradType(g_topo_grid); base_grad->ClearAllGradient(); RobinsType* first_robins = new RobinsType(g_topo_grid, g_maxv_labeling, base_grad); first_robins->ComputePairing(); mTimer->EndTask(taskid); //g_topo_alg = new TopologicalGradientUsingAlgorithms<MeshType, MeshFuncType, GradType>(g_topo_func, g_topo_grid, base_grad); //printf("after base first robins:\n"); //g_topo_alg->count_critical_points(4); if (!(need_ASC_3 || need_DSC_3 || need_ASC_1 || need_DSC_1 || needsad)) { g_topo_alg = new TopologicalGradientUsingAlgorithms<MeshType, MeshFuncType, GradType>(g_topo_func, g_topo_grid, base_grad); printf("no accuraccy needed, outputting\n"); RecordGrad(base_grad, gradname); return; } //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- // DO VOLUME ACCURATE GRADIENT COMPUTATION WITH NO RESTRICTION //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- // IF WE WANT NUMERIC ACCURACY, WE WILL NEED NUMEIC GRADIENT //------------------------------------------------------------- // Do gradient vectors computation from raw image data printf("computing gradient\n"); taskid = mTimer->StartTask("NumericalTracing GradCompute"); g_rgt_func->ComputeGradFromImage(1); //g_rgt_func->Negate(); mTimer->EndTask(taskid); // CREATE RESTRICTION MAP - WILL NEED DenseLabeling<char>* restriction_labels = new DenseLabeling<char>(g_topo_grid->numCells()); restriction_labels->SetAll(0); // we will always need a constrained robins alg RobinsType* constrained_robins = new RobinsType(g_topo_grid, g_maxv_labeling, restriction_labels, base_grad); //------------------------------------------------------------- // IF WE WANT ACCURATE 3-MANIFOLDS, COMPUTE SIMPLIFIED MAPS //------------------------------------------------------------- if (need_ASC_3 || need_DSC_3 || needsad) { taskid = mTimer->StartTask("Topological SimplifiedExtremumGraph"); SimplifiedExtremumGraph<MeshType, MeshFuncType, GradType>* simplified_ext_graph = new SimplifiedExtremumGraph<MeshType, MeshFuncType, GradType>(g_topo_grid, g_topo_func, base_grad); if ((need_ASC_3 && need_DSC_3) || needsad) { simplified_ext_graph->SetMode(SimplifiedExtremumGraph<MeshType, MeshFuncType, GradType>::EXTGRAPHMODE::BOTH); } else if (need_ASC_3) { simplified_ext_graph->SetMode(SimplifiedExtremumGraph<MeshType, MeshFuncType, GradType>::EXTGRAPHMODE::MINS); } else { simplified_ext_graph->SetMode(SimplifiedExtremumGraph<MeshType, MeshFuncType, GradType>::EXTGRAPHMODE::MAXS); } simplified_ext_graph->ComputeMinMapFromGradient(g_pre_simp_threshold); mTimer->EndTask(taskid); printf("done creating simplified extremum graph\n"); g_edge_map = new VertexLabelingToBoundaryLabeling<int, MaxVLType>(g_topo_grid, restriction_labels); g_edge_map->InitializeFirst(); //UFMergeGraph<MeshFuncType>* asdf = new UFMergeGraph<MeshFuncType>(); // NOW BUILD A TERMINAL MAP FROM THE SIMPLIFIED MIN/MAX HIERARCHIES // DO ASCENDING MANIFOLDS if (need_ASC_3 || needsad) { // a simplified map means that we only care about accurate boundaries for // extrema that persist above a given threshold. // per Julien's observation, we can compute the extremal simplification graphs // without spending the cost of doing a full MS Complex (even though we have the gradient) taskid = mTimer->StartTask("Topological SimExtRBASC"); std::unordered_map<INDEX_TYPE, INT_TYPE> extmapASC; for (auto p : simplified_ext_graph->mMinGraph->mCellIndexToListIdMap) { extmapASC[p.first] = simplified_ext_graph->mMinGraph->Representative(p.second); } GridSimplifiedExtremalRegionBuilder<ComparerASC, GridFuncType, MeshType>* test_simp_reg_builder_asc = new GridSimplifiedExtremalRegionBuilder<ComparerASC, GridFuncType, MeshType>(g_rgt_func, g_grid, g_topo_grid); test_simp_reg_builder_asc->BeginIntegration(extmapASC); mTimer->EndTask(taskid); //test_simp_reg_builder_asc->GetOutputLabels()->OutputToIntFile("extremal_asc.raw"); // now do actual numeric integration using simplified extremum regions map as target taskid = mTimer->StartTask("NumericalTracing NewIntegrationASC"); IntegratorTypeASC* newintegrator_asc = new IntegratorTypeASC(g_rgt_func, g_grid, error_threshold, gradient_threshold, iteration_limit); newintegrator_asc->BeginIntegration(test_simp_reg_builder_asc->GetIdMap(), test_simp_reg_builder_asc->GetOutputLabels(), true); //newintegrator_asc->GetOutputLabels()->OutputToIntFile("newintegrator_asc.raw"); mTimer->EndTask(taskid); //test_simp_reg_builder_asc->GetOutputLabels()->OutputToIntFile("integrated_asc.raw"); // REMOVE DISCONNECTED COMPONENTS taskid = mTimer->StartTask("Removed CleanerASC"); IsolatedCCRegionRemoverNEW<ComparerASC, GridFuncType>* cleaner1_asc = new IsolatedCCRegionRemoverNEW<ComparerASC, GridFuncType>(g_rgt_func, newintegrator_asc->GetOutputLabels()); printf("Removing Disconnected Regions\n"); cleaner1_asc->ComputeOutput(); printf("here2\n"); mTimer->EndTask(taskid); //newintegrator_asc->GetOutputLabels()->OutputToIntFile("newintegrator_cleaned_asc.raw"); //// TEST FIX //auto lab = test_simp_reg_builder_asc->GetOutputLabels(); //map<pair<int, int>, int> counter; //for (INDEX_TYPE id = 0; id < g_grid->NumElements(); id++) { // Vec3l t_neighbors[6]; // Vec3l t_coords = g_grid->XYZ3d(id); // int t_num_neighbors = g_grid->GatherExistingNeighborsSameBdry6(t_coords, t_neighbors); // int lab1 = lab->GetLabel(id); // for (int i = 0; i < t_num_neighbors; i++) { // INDEX_TYPE t_neighbor_vertex = g_grid->Index3d(t_neighbors[i]); // int lab2 = lab->GetLabel(t_neighbor_vertex); // if (lab1 != lab2) { // if (lab1 < lab2) { // pair<int, int> p(lab1, lab2); // if (counter.count(p) == 0) counter[p] = 1; // else (counter[p]++); // } // else { // pair<int, int> p(lab2, lab1); // if (counter.count(p) == 0) counter[p] = 1; // else (counter[p]++); // } // } // } //} //for (auto p : counter) { // if (p.first.first != -1 && p.second > 2000) // printf("<%d, %d>=%d\n", p.first.first, p.first.second, p.second); //} // add this guy's contribution to the restriction labeling taskid = mTimer->StartTask("Topological EdgeMapASC"); g_edge_map->ComputeMINBoundary(newintegrator_asc->GetOutputLabels()); mTimer->EndTask(taskid); } if (need_DSC_3 || needsad) { // DO DESCENDING MANIFOLDS taskid = mTimer->StartTask("Topological SimExtRBDSC"); std::unordered_map<INDEX_TYPE, INT_TYPE> extmapDSC; for (auto p : simplified_ext_graph->mMaxGraph->mCellIndexToListIdMap) { extmapDSC[g_maxv_labeling->Cell2HighestVertex(p.first)] = simplified_ext_graph->mMaxGraph->Representative(p.second); } GridSimplifiedExtremalRegionBuilder<ComparerDSC, GridFuncType, MeshType>* test_simp_reg_builder_dsc = new GridSimplifiedExtremalRegionBuilder<ComparerDSC, GridFuncType, MeshType>(g_rgt_func, g_grid, g_topo_grid); test_simp_reg_builder_dsc->BeginIntegration(extmapDSC); mTimer->EndTask(taskid); //test_simp_reg_builder_dsc->GetOutputLabels()->OutputToIntFile("extremal_dsc.raw"); taskid = mTimer->StartTask("NumericalTracing NewIntegrationDSC"); IntegratorTypeDSC* newintegrator_dsc = new IntegratorTypeDSC(g_rgt_func, g_grid, error_threshold, gradient_threshold, iteration_limit); newintegrator_dsc->BeginIntegration(test_simp_reg_builder_dsc->GetIdMap(), test_simp_reg_builder_dsc->GetOutputLabels(), true); //newintegrator_dsc->GetOutputLabels()->OutputToIntFile("newintegrator_dsc.raw"); mTimer->EndTask(taskid); //test_simp_reg_builder_dsc->GetOutputLabels()->OutputToIntFile("integrated_dsc.raw"); // REMOVE DISCONNECTED COMPONENTS taskid = mTimer->StartTask("NumericalTracing CleaningDSC"); IsolatedCCRegionRemoverNEW<ComparerDSC, GridFuncType>* cleaner1_dsc = new IsolatedCCRegionRemoverNEW<ComparerDSC, GridFuncType>(g_rgt_func, newintegrator_dsc->GetOutputLabels()); printf("Removing Disconnected Regions\n"); cleaner1_dsc->ComputeOutput(); printf("here2\n"); mTimer->EndTask(taskid); //newintegrator_dsc->GetOutputLabels()->OutputToIntFile("newintegrator_cleaned_dsc.raw"); //// TEST FIX //auto lab = test_simp_reg_builder_dsc->GetOutputLabels(); //map<pair<int, int>, int> counter; //for (INDEX_TYPE id = 0; id < g_grid->NumElements(); id++) { // Vec3l t_neighbors[6]; // Vec3l t_coords = g_grid->XYZ3d(id); // int t_num_neighbors = g_grid->GatherExistingNeighborsSameBdry6(t_coords, t_neighbors); // int lab1 = lab->GetLabel(id); // for (int i = 0; i < t_num_neighbors; i++) { // INDEX_TYPE t_neighbor_vertex = g_grid->Index3d(t_neighbors[i]); // int lab2 = lab->GetLabel(t_neighbor_vertex); // if (lab1 != lab2) { // if (lab1 < lab2) { // pair<int, int> p(lab1, lab2); // if (counter.count(p) == 0) counter[p] = 1; // else (counter[p]++); // } // else { // pair<int, int> p(lab2, lab1); // if (counter.count(p) == 0) counter[p] = 1; // else (counter[p]++); // } // } // } //} //for (auto p : counter) { // if (p.first.first != -1 && p.second > 2000) // printf("<%d, %d>=%d\n", p.first.first, p.first.second, p.second); //} // add this guy's contribution to the restriction labeling taskid = mTimer->StartTask("Topological EdgeMapDSC"); g_edge_map->ComputeMAXBoundary(newintegrator_dsc->GetOutputLabels(), g_maxv_labeling); mTimer->EndTask(taskid); } //restriction_labels->OutputToFile("boundary_labels_after_3m.raw"); if (outputdebug) { //restriction_labels->OutputToFile("boundary_labels_after_3m.raw"); } } //------------------------------------------------------------- // IF ALL WE NEED IS ACCURATE ASC/DSC 3-m then recompute grad and exit //------------------------------------------------------------- if (!(need_ASC_1 || need_DSC_1)) { taskid = mTimer->StartTask("Topological ConformingGrad"); printf("redoing discrete gradient in changed boundarids\n"); std::vector<INDEX_TYPE> topo_index_partition; int num_threads; #pragma omp parallel { #pragma omp single { num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(g_topo_grid->numCells(), num_threads, topo_index_partition); } int thread_num = omp_get_thread_num(); INDEX_TYPE threadfixcont = 0; // iterate over all vertices MeshType::DCellsIterator verts(g_topo_grid, 0, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); for (verts.begin(); verts.valid(); verts.advance()) { INDEX_TYPE vert_GI = verts.value(); bool hasdiff = false; if (restriction_labels->GetLabel(vert_GI) > 0) hasdiff = true; if (!hasdiff) { MeshType::AdjacentCellsIterator edgeit(g_topo_grid); //bool hasdiff = false; for (edgeit.begin(vert_GI); edgeit.valid(); edgeit.advance()) { INDEX_TYPE edge_GI = edgeit.value(); if (restriction_labels->GetLabel(edge_GI) > 0 && g_maxv_labeling->Cell2HighestVertex(edge_GI) == vert_GI) { hasdiff = true; break; } } } if (hasdiff) { constrained_robins->ComputeLowerStar(vert_GI); threadfixcont++; } } #pragma omp critical { printf("thread %d did %llu fixed vertices\n", thread_num, threadfixcont); } } mTimer->EndTask(taskid); //printf("asdf\n"); //BasicHardSimplifyGradient(base_grad, restriction_labels); g_topo_alg = new TopologicalGradientUsingAlgorithms<MeshType, MeshFuncType, GradType>(g_topo_func, g_topo_grid, base_grad); //printf("after Second robins:\n"); //g_topo_alg->count_critical_points(4); RecordGrad(base_grad, gradname); return; } //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- // DO LINES ACCURATE GRADIENT COMPUTATION //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- if (need_ASC_1) { taskid = mTimer->StartTask("Numerical SaddleIntASC"); int labeltarget = 4; g_digitizing_streamline_integrator_asc = new StreamlineIntegratorTypeASC(g_grid, g_rgt_func, g_topo_grid, error_threshold, gradient_threshold, iteration_limit); g_digitizing_streamline_integrator_asc->SetDigitizingTarget(restriction_labels, labeltarget); ReIntegrateUpFrom2Saddles(base_grad); mTimer->EndTask(taskid); } if (need_DSC_1) { taskid = mTimer->StartTask("Numerical SaddleIntDSC"); int labeltarget = 5; g_digitizing_streamline_integrator_dsc = new StreamlineIntegratorTypeDSC(g_grid, g_rgt_func, g_topo_grid, error_threshold, gradient_threshold, iteration_limit); g_digitizing_streamline_integrator_dsc->SetDigitizingTarget(restriction_labels, labeltarget); ReIntegrateDownFrom1Saddles(base_grad); mTimer->EndTask(taskid); } //restriction_labels->OutputToFile("boundary_labels_final.raw"); //------------------------------------------------------------- // NOW REDO DISCRETE GRADIENT IN NEIGHBORHOOD //------------------------------------------------------------- taskid = mTimer->StartTask("Topological ConformingGrad"); std::vector<INDEX_TYPE> topo_index_partition; int num_threads; #pragma omp parallel { #pragma omp single { num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(g_topo_grid->numCells(), num_threads, topo_index_partition); } int thread_num = omp_get_thread_num(); INDEX_TYPE threadfixcont = 0; // iterate over all vertices MeshType::DCellsIterator verts(g_topo_grid, 0, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); for (verts.begin(); verts.valid(); verts.advance()) { INDEX_TYPE vert_GI = verts.value(); bool hasdiff = false; if (restriction_labels->GetLabel(vert_GI) > 0) hasdiff = true; if (!hasdiff) { MeshType::AdjacentCellsIterator cocells(g_topo_grid); //bool hasdiff = false; for (cocells.begin(vert_GI); cocells.valid(); cocells.advance()) { INDEX_TYPE cocell_GI = cocells.value(); if (restriction_labels->GetLabel(cocell_GI) > 0 && g_maxv_labeling->Cell2HighestVertex(cocell_GI) == vert_GI) { hasdiff = true; break; } } } if (hasdiff) { constrained_robins->ComputeLowerStar(vert_GI); threadfixcont++; } } #pragma omp critical { printf("thread %d did %llu fixed vertices\n", thread_num, threadfixcont); } } mTimer->EndTask(taskid); printf("asdf\n"); //BasicHardSimplifyGradient(base_grad, restriction_labels); //bigEnd(parallelism, ); //return 1; mTimer->EndGlobal(); mTimer->WriteTimingsToFile(timingname); g_topo_alg = new TopologicalGradientUsingAlgorithms<MeshType, MeshFuncType, GradType>(g_topo_func, g_topo_grid, base_grad); printf("after Second robins:\n"); g_topo_alg->count_critical_points(4); RecordGrad(base_grad, gradname); return; }; public: //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- // DO LOCAL SIMPLIFICATION //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- //------------------------------------------------------------- void BasicHardSimplifyGradient(GradType* grad, DenseLabeling<char>* restriction_labels) { g_topo_alg = new TopologicalGradientUsingAlgorithms<MeshType, MeshFuncType, GradType>(g_topo_func, g_topo_grid, grad); printf("before local cancellations:\n"); g_topo_alg->count_critical_points(4); printf("checking for loops:\n"); g_topo_alg->CheckGradientForLoops(); printf("done\n"); int counts[3]; for (int i = 0; i < 3; i++) counts[i] = 0; //for (int k = 0; k < 2; k++) { MeshType::DCellsIterator eit(g_topo_grid, 0); for (eit.begin(); eit.valid(); eit.advance()) { INDEX_TYPE eid = eit.value(); //INDEX_TYPE v1 = g_maxv_labeling->Cell2HighestVertex(eid); if (grad->getCritical(eid)) { char lab1 = restriction_labels->GetLabel(eid); std::vector<INDEX_TYPE> candidates; MeshType::CofacetsIterator cfit(g_topo_grid); for (cfit.begin(eid); cfit.valid(); cfit.advance()) { INDEX_TYPE fid = cfit.value(); if (grad->getCritical(fid)) { char lab2 = restriction_labels->GetLabel(fid); if (lab1 == lab2) candidates.push_back(fid); } } //if (candidates.size() == 1) { for (auto fid : candidates) { if (g_topo_grid->boundaryValue(eid) == g_topo_grid->boundaryValue(fid) ) { counts[0]++; grad->setPair(eid, fid); grad->setPair(fid, eid); break; //printf("%d: %llu -> %llu : %d - %d\n",k, eid, fid, lab1, lab2); } } //else if (lab1 != lab2) { // counts[g_topo_grid->dimension(eid)]++; // continue; //} //else { // if (g_topo_grid->boundaryValue(eid) == g_topo_grid->boundaryValue(fid) // //lab1 != lab2 && // /*g_maxv_labeling->Before(g_topo_grid->VertexNumberFjkromCellID(g_robin_alg->lowest_vertex(fid)), // g_topo_grid->VertexNumberFromCellID(g_robin_alg->lowest_vertex(eid)))*/) { // base_grad->setPair(eid, fid); // base_grad->setPair(fid, eid); // break; // //printf("%d: %llu -> %llu : %d - %d\n",k, eid, fid, lab1, lab2); // } //} } //} } MeshType::DCellsIterator eit2(g_topo_grid, 3); for (eit2.begin(); eit2.valid(); eit2.advance()) { INDEX_TYPE eid = eit2.value(); //INDEX_TYPE v1 = g_maxv_labeling->Cell2HighestVertex(eid); if (grad->getCritical(eid)) { char lab1 = restriction_labels->GetLabel(eid); std::vector<INDEX_TYPE> candidates; MeshType::FacetsIterator cfit(g_topo_grid); for (cfit.begin(eid); cfit.valid(); cfit.advance()) { INDEX_TYPE fid = cfit.value(); if (grad->getCritical(fid)) { char lab2 = restriction_labels->GetLabel(fid); if (lab1 == lab2) candidates.push_back(fid); } } //if (candidates.size() == 1) { for (auto fid : candidates) { if (g_topo_grid->boundaryValue(eid) == g_topo_grid->boundaryValue(fid) ) { counts[2]++; grad->setPair(eid, fid); grad->setPair(fid, eid); break; //printf("%d: %llu -> %llu : %d - %d\n",k, eid, fid, lab1, lab2); } } //else if (lab1 != lab2) { // counts[g_topo_grid->dimension(eid)]++; // continue; //} //else { // if (g_topo_grid->boundaryValue(eid) == g_topo_grid->boundaryValue(fid) // //lab1 != lab2 && // /*g_maxv_labeling->Before(g_topo_grid->VertexNumberFjkromCellID(g_robin_alg->lowest_vertex(fid)), // g_topo_grid->VertexNumberFromCellID(g_robin_alg->lowest_vertex(eid)))*/) { // base_grad->setPair(eid, fid); // base_grad->setPair(fid, eid); // break; // //printf("%d: %llu -> %llu : %d - %d\n",k, eid, fid, lab1, lab2); // } //} } //} } MeshType::DCellsIterator eit3(g_topo_grid, 2); for (eit3.begin(); eit3.valid(); eit3.advance()) { INDEX_TYPE eid = eit3.value(); //INDEX_TYPE v1 = g_maxv_labeling->Cell2HighestVertex(eid); if (grad->getCritical(eid)) { char lab1 = restriction_labels->GetLabel(eid); std::vector<INDEX_TYPE> candidates; MeshType::FacetsIterator cfit(g_topo_grid); for (cfit.begin(eid); cfit.valid(); cfit.advance()) { INDEX_TYPE fid = cfit.value(); if (grad->getCritical(fid)) { char lab2 = restriction_labels->GetLabel(fid); if (lab1 == lab2) candidates.push_back(fid); } } //if (candidates.size() == 1) { for (auto fid : candidates) { if (g_topo_grid->boundaryValue(eid) == g_topo_grid->boundaryValue(fid) ) { counts[1]++; grad->setPair(eid, fid); grad->setPair(fid, eid); break; //printf("%d: %llu -> %llu : %d - %d\n",k, eid, fid, lab1, lab2); } } //else if (lab1 != lab2) { // counts[g_topo_grid->dimension(eid)]++; // continue; //} //else { // if (g_topo_grid->boundaryValue(eid) == g_topo_grid->boundaryValue(fid) // //lab1 != lab2 && // /*g_maxv_labeling->Before(g_topo_grid->VertexNumberFjkromCellID(g_robin_alg->lowest_vertex(fid)), // g_topo_grid->VertexNumberFromCellID(g_robin_alg->lowest_vertex(eid)))*/) { // base_grad->setPair(eid, fid); // base_grad->setPair(fid, eid); // break; // //printf("%d: %llu -> %llu : %d - %d\n",k, eid, fid, lab1, lab2); // } //} } //} } //} //} for (int i = 0; i < 3; i++) printf("dim-%d crits from mismatch\n", counts[i]); printf("after local cancellations:\n"); g_topo_alg->count_critical_points(4); printf("chekcing for loops:\n"); g_topo_alg->CheckGradientForLoops(); } void RecordGrad(GradType* grad, const char* gradname) { printf("setting dim asc man\n"); g_topo_alg->setAscendingManifoldDimensions(); printf("outputting to file %s\n", gradname); grad->outputToFile(gradname); //return 1; } void BuildGradient(int x, int y, int z, char* fname, int needa1, int needd1, int needa2, int needd2, int psimp) { X = x; Y = y; Z = z; filename = std::string(fname); error_threshold = 0.0001; gradient_threshold = 0.0; iteration_limit = 500; need_ASC_1 = needa1; need_DSC_1 = needd1; need_ASC_3 = needa2; need_DSC_1 = needd2; g_pre_simp_threshold = psimp; parallelism = omp_get_num_threads(); printf("dims=(%d,%d,%d)\nfile=%s\nintegration parameters: e=%f, gt=%f, il=%d\nondemandacc: a1=%d, d1=%d, a2=%d, d2=%d, ps=%f\npar=%d\n", X, Y, Z, fname, error_threshold, gradient_threshold, iteration_limit, need_ASC_1, need_DSC_1, need_ASC_3, need_DSC_3, g_pre_simp_threshold, parallelism); do_work(); } void BuildGradient(int argc, char** argv) { // read command line options GetOptions(argc, argv); do_work(); } }; // class OndemandGradientBuilder } // namespace GInt #endif
lastpass_fmt_plug.c
/* LastPass offline cracker patch for JtR. Hacked together during January of 2013 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * All the hard work was done by Milen (author of hashkill). * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_lastpass; #elif FMT_REGISTERS_H john_register_one(&fmt_lastpass); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "johnswap.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include <openssl/aes.h> #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 64 #endif #include "memdbg.h" #define FORMAT_LABEL "lp" #define FORMAT_NAME "LastPass offline" #ifdef MMX_COEF_SHA256 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef MMX_COEF_SHA256 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests lastpass_tests[] = { {"$lp$hackme@mailinator.com$6f5d8cec3615fc9ac7ba2e0569bce4f5", "strongpassword"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int iterations; int salt_length; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; if (strncmp(ciphertext, "$lp$", 4)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 4; if ((p = strtok(ctcopy, "$")) == NULL) /* email */ goto err; if (strlen(p) > 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* hash */ goto err; if (strlen(p) != 32) goto err; if (!ishex(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 4; /* skip over "$lp$" */ p = strtok(ctcopy, "$"); strncpy((char*)cs.salt, p, 32); cs.salt_length = strlen((char*)p); MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { AES_KEY akey; #ifdef MMX_COEF_SHA256 int lens[MAX_KEYS_PER_CRYPT], i; unsigned char *pin[MAX_KEYS_PER_CRYPT]; ARCH_WORD_32 key[MAX_KEYS_PER_CRYPT][8]; union { ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; x.pout[i] = key[i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, 500, &(x.poutc), 32, 0); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { memset(&akey, 0, sizeof(AES_KEY)); AES_set_encrypt_key((unsigned char*)key[i], 256, &akey); AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[i+index], &akey, AES_ENCRYPT); } #else unsigned char key[32]; pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->salt_length, 500, key, 32, 0); #if !ARCH_LITTLE_ENDIAN { int i; for (i = 0; i < 8; ++i) { ((ARCH_WORD_32*)key)[i] = JOHNSWAP(((ARCH_WORD_32*)key)[i]); } } #endif memset(&akey, 0, sizeof(AES_KEY)); AES_set_encrypt_key((unsigned char*)key, 256, &akey); AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[index], &akey, AES_ENCRYPT); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void lastpass_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_lastpass = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif lastpass_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, lastpass_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
mc_funcs.h
#ifndef MC_FUNCS #define MC_FUNCS #include <stdlib.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <assert.h> #include <string> #include <iomanip> #include <stdint.h> #include <stdlib.h> #include <unistd.h> #include <sys/time.h> #include <torch/extension.h> #include <ATen/record_function.h> #include <torch/csrc/autograd/VariableTypeUtils.h> #include <vector> #include <iostream> #ifdef _OPENMP #include <omp.h> #pragma message "Using OpenMP" #else #define omp_get_max_threads() 1 #define omp_get_num_threads() 1 #define omp_get_thread_num() 0 #endif #include <libxsmm.h> //#include <libxsmm_intrinsics_x86.h> #include <immintrin.h> static thread_local unsigned int *rnd_state = NULL; void set_rnd_seed(unsigned int seed) { #pragma omp parallel { int tid = omp_get_thread_num(); if(rnd_state) { libxsmm_rng_destroy_extstate(rnd_state); rnd_state = NULL; } rnd_state = libxsmm_rng_create_extstate(seed+tid); } } void init_libxsmm() { libxsmm_init(); set_rnd_seed(0); } struct f32 { std::vector<at::Tensor> dropout_forward(torch::Tensor input, float p, bool train); at::Tensor dropout_backward(torch::Tensor input, torch::Tensor dropout_mask, float p); }; // --------------------------------------- copy() ----------------------------------------------------------------- inline void f32_copy(int N, int M, int LDO, int LDI, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_F32; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, &LDI, &LDO, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for f32 to f32 copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void zero(int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_XOR; libxsmm_datatype dtype = LIBXSMM_DATATYPE_F32; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, 1, &M, &M, dtype, dtype, dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for zero kernel failed. Bailing...!\n"); exit(-1); } kernel(params); } #endif
for-5.c
// { dg-options "-fopenmp" } void bar (void *); __attribute__((noinline, noclone)) void foo (void *qx, void *rx, void *sx, int n) { unsigned short (*q)[n], (*r)[n], (*s)[n], (*p)[n]; q = (typeof (q)) qx; r = (typeof (r)) rx; s = (typeof (s)) sx; int t = 1; int o = -1; #pragma omp for for (p = q; p != r; p += t) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = s; p != r; p += o) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = q; p != r; p = p + t) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = s; p != r; p = p + o) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = q; p != r; p = t + p) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = s; p != r; p = o + p) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = q; p != r; p += 2) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = s; p != r; p -= 2) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = q; p != r; p = p + 3) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = s; p != r; p = p - 3) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = q; p != r; p = 4 + p) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); #pragma omp for for (p = s; p != r; p = -5 + p) /* { dg-error "increment is not constant 1 or -1" } */ bar (p); }
model_initializer.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_MODEL_INITIALIZER_H_ #define CORE_MODEL_INITIALIZER_H_ #include <ctime> #include <string> #include <vector> #include "core/container/math_array.h" #include "core/diffusion/diffusion_grid.h" #include "core/resource_manager.h" #include "core/simulation.h" #include "core/util/random.h" class EulerGrid; class StencilGrid; class RungaKuttaGrid; namespace bdm { struct ModelInitializer { /// Creates a 3D cubic grid of agents and adds them to the /// ExecutionContext. Type of the agent is determined by the return /// type of parameter agent_builder. /// /// ModelInitializer::Grid3D(8, 10, [](const Double3& pos){ /// return Cell(pos); }); /// @param agents_per_dim number of agents on each axis. /// Number of generated agents = /// `agents_per_dim ^ 3` /// @param space space between the positions - e.g space = 10: /// positions = `{(0, 0, 0), (0, 0, 10), (0, 0, /// 20), ... }` /// @param agent_builder function containing the logic to instantiate a /// new agent. Takes `const /// Double3&` as input parameter /// template <typename Function> static void Grid3D(size_t agents_per_dim, double space, Function agent_builder) { #pragma omp parallel { auto* sim = Simulation::GetActive(); auto* ctxt = sim->GetExecutionContext(); #pragma omp for for (size_t x = 0; x < agents_per_dim; x++) { auto x_pos = x * space; for (size_t y = 0; y < agents_per_dim; y++) { auto y_pos = y * space; for (size_t z = 0; z < agents_per_dim; z++) { auto* new_agent = agent_builder({x_pos, y_pos, z * space}); ctxt->AddAgent(new_agent); } } } } } /// Creates a 3D grid of agents and adds them to the /// ExecutionContext. Type of the agent is determined by the return /// type of parameter agent_builder. /// /// ModelInitializer::Grid3D({8,6,4}, 10, [](const Double3& /// pos){ return Cell(pos); }); /// @param agents_per_dim number of agents on each axis. /// Number of generated agents = /// `agents_per_dim[0] * agents_per_dim[1] * /// agents_per_dim[2]` /// @param space space between the positions - e.g space = 10: /// positions = `{(0, 0, 0), (0, 0, 10), (0, 0, /// 20), ... }` /// @param agent_builder function containing the logic to instantiate a /// new agent. Takes `const /// Double3&` as input parameter /// template <typename Function> static void Grid3D(const std::array<size_t, 3>& agents_per_dim, double space, Function agent_builder) { #pragma omp parallel { auto* sim = Simulation::GetActive(); auto* ctxt = sim->GetExecutionContext(); #pragma omp for for (size_t x = 0; x < agents_per_dim[0]; x++) { auto x_pos = x * space; for (size_t y = 0; y < agents_per_dim[1]; y++) { auto y_pos = y * space; for (size_t z = 0; z < agents_per_dim[2]; z++) { auto* new_agent = agent_builder({x_pos, y_pos, z * space}); ctxt->AddAgent(new_agent); } } } } } /// Creates agents on the given positions and adds them to the /// ExecutionContext. /// /// @param positions positions of the agents to be /// @param agent_builder function containing the logic to instantiate a /// new agent. Takes `const /// Double3&` as input parameter /// template <typename Function> static void CreateAgents(const std::vector<Double3>& positions, Function agent_builder) { #pragma omp parallel { auto* sim = Simulation::GetActive(); auto* ctxt = sim->GetExecutionContext(); #pragma omp for for (size_t i = 0; i < positions.size(); i++) { auto* new_agent = agent_builder({positions[i][0], positions[i][1], positions[i][2]}); ctxt->AddAgent(new_agent); } } } /// Creates agents with random positions and adds them to the /// ExecutionContext. Agent creation is parallelized. /// /// @param[in] min The minimum position value /// @param[in] max The maximum position value /// @param[in] num_agents The number agents /// @param[in] agent_builder function containing the logic to instantiate a /// new agent. Takes `const /// Double3&` as input parameter /// \param[in] rng Uses the given DistributionRng. /// if rng is a nullptr, this function uses a /// uniform distribution between [min, max[ template <typename Function> static void CreateAgentsRandom(double min, double max, uint64_t num_agents, Function agent_builder, DistributionRng<double>* rng = nullptr) { #pragma omp parallel { auto* sim = Simulation::GetActive(); auto* ctxt = sim->GetExecutionContext(); auto* random = sim->GetRandom(); #pragma omp for for (uint64_t i = 0; i < num_agents; i++) { if (rng != nullptr) { Double3 pos; bool in_range = false; do { pos = rng->Sample3(); in_range = (pos[0] >= min) && (pos[0] <= max) && (pos[1] >= min) && (pos[1] <= max) && (pos[2] >= min) && (pos[2] <= max); } while (!in_range); auto* new_agent = agent_builder(pos); ctxt->AddAgent(new_agent); } else { auto* new_agent = agent_builder(random->UniformArray<3>(min, max)); ctxt->AddAgent(new_agent); } } } } /// Creates agents on surface and adds them to the ExecutionContext. /// The x and y positions are defined by xmin, xmax, deltax and ymin, ymax, /// deltay. The z position is calculated using `f`. Agent creation is /// parallelized. /// /// auto construct = [](const Double3& position) { /// Cell* cell = new Cell(position); /// cell->SetDiameter(10); /// return cell; /// }; /// auto f = [](const double* x, const double* params) { /// // 10 * sin(x/20) + 10 * sin(y/20) /// return 10 * std::sin(x[0] / 20.) + 10 * std::sin(x[1] / 20.0); /// }; /// ModelInitializer::CreateAgentsOnSurface(f, {}, -100, 100, 10, -100, /// 100, 10, construct); /// /// \param[in] f function that defines the surface /// \param[in] fn_params Parameters that will be passed to `f` as /// second argument. /// @param[in] xmin Minimum x coordinate on which a agent will be /// created. /// @param[in] xmax Maximum x coordinate on which a agent will be /// created. /// @param[in] deltax Space between two agents on the x-axis. /// @param[in] ymin Minimum y coordinate on which a agent will be /// created. /// @param[in] ymax Maximum y coordinate on which a agent will be /// created. /// @param[in] deltay Space between two agents on the y-axis. /// @param[in] agent_builder function containing the logic to instantiate a /// new agent. Takes `const Double3&` as input /// parameter template <typename Function> static void CreateAgentsOnSurface( double (*f)(const double*, const double*), const FixedSizeVector<double, 10>& fn_params, double xmin, double xmax, double deltax, double ymin, double ymax, double deltay, Function agent_builder) { #pragma omp parallel { auto* sim = Simulation::GetActive(); auto* ctxt = sim->GetExecutionContext(); auto xiterations = static_cast<uint64_t>(std::floor((xmax - xmin) / deltax)); auto yiterations = static_cast<uint64_t>(std::floor((ymax - ymin) / deltay)); #pragma omp for for (uint64_t xit = 0; xit < xiterations; ++xit) { double x = xmin + xit * deltax; for (uint64_t yit = 0; yit < yiterations; ++yit) { double y = ymin + yit * deltay; Double3 pos = {x, y}; pos[2] = f(pos.data(), fn_params.data()); ctxt->AddAgent(agent_builder(pos)); } } } } /// Creates agents on surface and adds them to the ExecutionContext. /// The x and y positions are determined by a uniform distribution [xmin, /// xmax[ and [ymin, ymax[. The z position is calculated using `f`. Agent /// creation is parallelized. /// /// auto construct = [](const Double3& position) { /// Cell* cell = new Cell(position); /// cell->SetDiameter(10); /// return cell; /// }; /// auto f = [](const double* x, const double* params) { /// // 10 * sin(x/20) + 10 * sin(y/20) /// return 10 * std::sin(x[0] / 20.) + 10 * std::sin(x[1] / 20.0); /// }; /// ModelInitializer::CreateAgentsOnSurfaceRndm(f, {}, -100, 100, -100, /// 100, construct); /// /// \param[in] f function that defines the surface /// \param[in] fn_params Parameters that will be passed to `f` as /// second argument. /// @param[in] xmin Minimum x coordinate on which a agent will be /// created. /// @param[in] xmax Maximum x coordinate on which a agent will be /// created. /// @param[in] ymin Minimum y coordinate on which a agent will be /// created. /// @param[in] ymax Maximum y coordinate on which a agent will be /// created. /// @param[in] agent_builder function containing the logic to instantiate a /// new agent. Takes `const Double3&` as input /// parameter template <typename Function> static void CreateAgentsOnSurfaceRndm( double (*f)(const double*, const double*), const FixedSizeVector<double, 10>& fn_params, double xmin, double xmax, double ymin, double ymax, uint64_t num_agents, Function agent_builder) { #pragma omp parallel { auto* sim = Simulation::GetActive(); auto* ctxt = sim->GetExecutionContext(); auto* random = sim->GetRandom(); #pragma omp for for (uint64_t i = 0; i < num_agents; ++i) { Double3 pos = {random->Uniform(xmin, xmax), random->Uniform(ymin, ymax)}; pos[2] = f(pos.data(), fn_params.data()); ctxt->AddAgent(agent_builder(pos)); } } } /// Creates agents with random positions on a sphere and adds them to the /// ExecutionContext. Agent creation is parallelized. /// /// \param[in] center Center of the sphere /// \param[in] radius Radius of the sphere /// @param[in] num_agents The number of agents /// @param[in] agent_builder function containing the logic to instantiate a /// new agent. Takes `const /// Double3&` as input parameter template <typename Function> static void CreateAgentsOnSphereRndm(const Double3& center, double radius, uint64_t num_agents, Function agent_builder) { #pragma omp parallel { auto* sim = Simulation::GetActive(); auto* ctxt = sim->GetExecutionContext(); auto* random = sim->GetRandom(); #pragma omp for for (uint64_t i = 0; i < num_agents; i++) { auto pos = random->Sphere(radius) + center; auto* new_agent = agent_builder(pos); ctxt->AddAgent(new_agent); } } } /// Allows agents to secrete the specified substance. Diffusion throughout the /// simulation space is automatically taken care of by the DiffusionGrid class /// /// @param[in] substance_id The substance identifier /// @param[in] substance_name The substance name /// @param[in] diffusion_coeff The diffusion coefficient /// @param[in] decay_constant The decay constant /// @param[in] resolution The resolution of the diffusion grid /// static void DefineSubstance(size_t substance_id, std::string substance_name, double diffusion_coeff, double decay_constant, int resolution = 10); template <typename F> static void InitializeSubstance(size_t substance_id, F function) { auto* sim = Simulation::GetActive(); auto* rm = sim->GetResourceManager(); auto diffusion_grid = rm->GetDiffusionGrid(substance_id); diffusion_grid->AddInitializer(function); } }; } // namespace bdm #endif // CORE_MODEL_INITIALIZER_H_
GB_binop__eq_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_03__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int64) // A*D function (colscale): GB (_AxD__eq_int64) // D*A function (rowscale): GB (_DxB__eq_int64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int64) // C=scalar+B GB (_bind1st__eq_int64) // C=scalar+B' GB (_bind1st_tran__eq_int64) // C=A+scalar GB (_bind2nd__eq_int64) // C=A'+scalar GB (_bind2nd_tran__eq_int64) // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT64 || GxB_NO_EQ_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
plm_glasso.c
#include "plm_glasso.h" #include <math.h> #include <string.h> #include "model.h" #include "type.h" int plm_glasso(model_t *m) { // set the parameters of lbfgs_parameter_t param; lbfgs_parameter_init(&param); param.max_iterations = m->iter; param.epsilon = 1e-20; param.max_linesearch = 20; lbfgsfloatval_t fx; int nvar = m->nvar; int ncol = m->ncol; int nsingle = ncol * ALPHA; double *u = (double *)malloc(sizeof(double) * nvar); double *z = (double *)malloc(sizeof(double) * nvar); memset(u, 0, nvar * sizeof(double)); memset(z, 0, nvar * sizeof(double)); double *x = m->x; m->glasso_u = u; m->glasso_z = z; int t = 0; while (t < m->glasso_iter) { double lambda_rho = m->glasso_lambda / m->glasso_rho; // printf("admm iter= %2d lambda_rho= %.4f\n", t + 1, lambda_rho); // step1: update x // x = min_x f(x) + \rho/2 ||x - z + u ||_2^2 int lbfgs_ret = lbfgs(nvar, m->x, &fx, evaluate_plm_glasso, progress_plm_glasso, m, &param); // step2: update z // z_i = S_{\lambda/\rho} (x_i + u_i) for (int c1 = 0; c1 < ncol; c1++) { for (int c2 = c1 + 1; c2 < ncol; c2++) { int offset = INDEX2(c1, c2, 0, 0); double *xx = x + offset; double *uu = u + offset; double *zz = z + offset; double norm2 = 0; for (int i = 0; i < ALPHA2; i++) { norm2 += (xx[i] + uu[i]) * (xx[i] + uu[i]); } norm2 = sqrt(norm2); double ratio = 1.0 - lambda_rho / norm2; if (ratio < 0.0) { // inactive memset(zz, 0, ALPHA2 * sizeof(double)); } else { for (int i = 0; i < ALPHA2; i++) { zz[i] = ratio * (xx[i] + uu[i]); } } } } // step3: update u double *uu = u + nsingle; double *xx = x + nsingle; double *zz = z + nsingle; double eps = 0; double x_norm, u_norm, z_norm = 0.0; for (int i = 0; i < nvar - nsingle; i++) { uu[i] += xx[i] - zz[i]; eps += uu[i] * uu[i]; x_norm += xx[i] * xx[i]; z_norm += zz[i] * zz[i]; u_norm += uu[i] * uu[i]; } eps = sqrt(eps); x_norm = sqrt(x_norm); z_norm = sqrt(z_norm); u_norm = sqrt(u_norm); double eps_ret = eps / x_norm; printf( "glasso iter = %3d rho= %.3f lambda_rho= %.3f eps= %.6f ret_eps= %.6f " "lbfgs_ret= %2d rho= %.4f x_norm= %.3f z_norm=%.3f u_norm= %.3f\n", t + 1, m->glasso_rho, lambda_rho, eps, eps_ret, lbfgs_ret, m->glasso_rho, x_norm, z_norm, u_norm); if (eps_ret < m->tolerance_ret) { break; } m->glasso_rho *= 1.2; t += 1; } free(u); free(z); } lbfgsfloatval_t evaluate_plm_glasso(void *instance, const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n, const lbfgsfloatval_t step) { model_t *model = (model_t *)instance; unsigned char *msa = model->msa; int nrow = model->nrow; int ncol = model->ncol; double *w = model->w; double neff = model->neff; int threads_num = model->threads_num; int nsingle = ncol * ALPHA; // initialize double *objective_all = (double *)malloc(threads_num * sizeof(double)); memset(objective_all, 0.0, sizeof(double) * threads_num); double **gradient_all = model->gradient_all; for (int t = 0; t < threads_num; ++t) { memset(gradient_all[t], 0.0, sizeof(double) * n); } int per = (ncol) / threads_num; #pragma omp parallel for for (int t = 0; t < threads_num; t++) { int pos_begin = per * t; int pos_end = per * (t + 1); if (t == threads_num - 1) { pos_end = ncol; } lbfgsfloatval_t *pre_prob = (lbfgsfloatval_t *)malloc(sizeof(lbfgsfloatval_t) * ALPHA); lbfgsfloatval_t *prob = (lbfgsfloatval_t *)malloc(sizeof(lbfgsfloatval_t) * ALPHA); for (int c = pos_begin; c < pos_end; c++) { for (int r = 0; r < nrow; r++) { unsigned char *seq = msa + r * ncol; char aa_c = seq[c]; for (int aa = 0; aa < ALPHA; aa++) { pre_prob[aa] = x[INDEX1(c, aa)]; } memset(pre_prob, 0, sizeof(lbfgsfloatval_t) * ALPHA); for (int i = 0; i < ncol; i++) { for (int aa = 0; aa < ALPHA; aa++) { if (i < c) { pre_prob[aa] += x[INDEX2(i, c, seq[i], aa)]; } else if (i > c) { pre_prob[aa] += x[INDEX2(c, i, aa, seq[i])]; } } } double sum = 0.0; for (int aa = 0; aa < ALPHA; aa++) { sum += exp(pre_prob[aa]); } double logz = log(sum); for (int aa = 0; aa < ALPHA; aa++) { prob[aa] = exp(pre_prob[aa]) / sum; } // objective function objective_all[t] += logz - pre_prob[aa_c]; // cal gradients gradient_all[t][INDEX1(c, aa_c)] -= w[r]; for (int aa = 0; aa < ALPHA; aa++) { gradient_all[t][INDEX1(c, aa)] += w[r] * prob[aa]; } for (int i = 0; i < ncol; i++) { if (i < c) { gradient_all[t][INDEX2(i, c, seq[i], aa_c)] -= w[r]; } else if (i > c) { gradient_all[t][INDEX2(c, i, aa_c, seq[i])] -= w[r]; } for (int aa = 0; aa < ALPHA; aa++) { if (i < c) { gradient_all[t][INDEX2(i, c, seq[i], aa)] += w[r] * prob[aa]; } else if (i > c) { gradient_all[t][INDEX2(c, i, aa, seq[i])] += w[r] * prob[aa]; } } } } // end r } // end c free(pre_prob); free(prob); } // reduction data lbfgsfloatval_t fx = 0.0; memset(g, 0, sizeof(lbfgsfloatval_t) * n); for (int t = 0; t < threads_num; ++t) { fx += objective_all[t]; for (int i = 0; i < n; ++i) { g[i] += gradient_all[t][i]; } } // add regularization lbfgsfloatval_t lambda_single = model->lambda_single * neff; lbfgsfloatval_t lambda_pair = model->lambda_pair * neff; for (int i = 0; i < nsingle; i++) { fx += lambda_single * x[i] * x[i]; g[i] += 2.0 * lambda_single * x[i]; } // glasso double *u = model->glasso_u; double *z = model->glasso_z; double rho = model->glasso_rho; for (int i = nsingle; i < n; i++) { double temp = x[i] - z[i] + u[i]; fx += rho / 2.0 * temp * temp; g[i] += rho * temp; } free(objective_all); return fx; } int progress_plm_glasso(void *instance, const lbfgsfloatval_t *x, const lbfgsfloatval_t *g, const lbfgsfloatval_t fx, const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm, const lbfgsfloatval_t step, int n, int k, int ls) { model_t *model = (model_t *)instance; model->iter = k; fprintf(model->flog, "iter= %d fx= %f xnorm = %f gnorm = %f step= %f ", k, fx, xnorm, gnorm, step); evaluate_model(model); printf("iter= %d fx= %f, xnorm = %f, gnorm = %f, step= %f ", k, fx, xnorm, gnorm, step); printf("orig_acc "); for (int i = 0; i < 8; i++) { printf("%.4f ", model->mat_acc[i]); } printf("apc_acc "); for (int i = 0; i < 8; i++) { printf("%.4f ", model->apc_acc[i]); } printf("\n"); return 0; }
conv_dw_k5_k7_kernel_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #ifndef __CONV_DW_K5_K7_KERNEL_ARM_H_ #define __CONV_DW_K5_K7_KERNEL_ARM_H_ #include <stdio.h> #include <arm_neon.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> void dw_k5s1(float*, float*, float*, float*, int, int, int); static float elem_activation(float tmp, int type) { if (type == 0) { if (tmp < 0.0f) tmp = 0; if (type > 0) tmp = tmp < type ? tmp : type; } return tmp; } static float32x4_t vector_activation(float32x4_t tmp, int type) { if (type == 0) { float32x4_t zero = vdupq_n_f32(0.0); tmp = vmaxq_f32(tmp, zero); if (type > 0) { float32x4_t max = vdupq_n_f32((float)type); tmp = vminq_f32(tmp, max); } } return tmp; } void depthwise_conv_k5s1(float* input, float* weight, float* bias, float* output, int input_h, int input_w, int channel, int output_h, int output_w, int activation, int num_thread) { // #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float* input_cur = (float*)input + c * input_h * input_w; float* weight_cur = (float*)weight + c * 25; float* output_cur = (float*)output + c * output_h * output_w; float* bias_cur = NULL; if (bias) bias_cur = (float*)bias + c; dw_k5s1(input_cur, weight_cur, bias_cur, output_cur, output_h, output_w, activation); } } void depthwise_conv_k5s2(float* input_buf, float* weight_buf, float* bias, float* output_buf, int input_h, int input_w, int channel, int output_h, int output_w, int activation, int num_thread) { int input_hw = input_h * input_w; int output_hw = output_h * output_w; int h_remain = input_h & 0x1; int w_remain = input_w & 0x1; int mid_h = output_h - 2; int mid_w = output_w - 2; int mid_w_block = mid_w & -4; // #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { int w, h; float* input_buf_c = input_buf + c * input_hw; float* output_buf_c = output_buf + c * output_hw; float* weight_buf_c = weight_buf + c * 25; float bias_c = bias ? bias[c] : 0; float tmp = bias_c; tmp += weight_buf_c[12] * input_buf_c[0]; tmp += weight_buf_c[13] * input_buf_c[1]; tmp += weight_buf_c[14] * input_buf_c[2]; tmp += weight_buf_c[17] * input_buf_c[input_w]; tmp += weight_buf_c[18] * input_buf_c[input_w + 1]; tmp += weight_buf_c[19] * input_buf_c[input_w + 2]; tmp += weight_buf_c[22] * input_buf_c[input_w * 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * 2 + 1]; tmp += weight_buf_c[24] * input_buf_c[input_w * 2 + 2]; output_buf_c[0] = elem_activation(tmp, activation); for (w = 0; w < mid_w_block; w += 4) { float32x4_t sum0 = vdupq_n_f32(bias_c); float32x4_t line2_0 = vld1q_f32(input_buf_c + 2 * w); float32x4_t line2_1 = vld1q_f32(input_buf_c + 2 * w + 4); float32x4_t line2_2 = vld1q_f32(input_buf_c + 2 * w + 8); float32x4x2_t line2_01 = vuzpq_f32(line2_0, line2_1); float32x4x2_t line2_12 = vuzpq_f32(line2_1, line2_2); float32x4_t input2_2 = vextq_f32(line2_01.val[0], line2_2, 1); float32x4_t input2_3 = vextq_f32(line2_0, line2_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[10]), line2_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[11]), line2_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[12]), input2_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[13]), input2_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[14]), line2_12.val[0]); float32x4_t line3_0 = vld1q_f32(input_buf_c + input_w + 2 * w); float32x4_t line3_1 = vld1q_f32(input_buf_c + input_w + 2 * w + 4); float32x4_t line3_2 = vld1q_f32(input_buf_c + input_w + 2 * w + 8); float32x4x2_t line3_01 = vuzpq_f32(line3_0, line3_1); float32x4x2_t line3_12 = vuzpq_f32(line3_1, line3_2); float32x4_t input3_2 = vextq_f32(line3_01.val[0], line3_2, 1); float32x4_t input3_3 = vextq_f32(line3_0, line3_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[15]), line3_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[16]), line3_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[17]), input3_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[18]), input3_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[19]), line3_12.val[0]); float32x4_t line4_0 = vld1q_f32(input_buf_c + input_w * 2 + 2 * w); float32x4_t line4_1 = vld1q_f32(input_buf_c + input_w * 2 + 2 * w + 4); float32x4_t line4_2 = vld1q_f32(input_buf_c + input_w * 2 + 2 * w + 8); float32x4x2_t line4_01 = vuzpq_f32(line4_0, line4_1); float32x4x2_t line4_12 = vuzpq_f32(line4_1, line4_2); float32x4_t input4_2 = vextq_f32(line4_01.val[0], line4_2, 1); float32x4_t input4_3 = vextq_f32(line4_0, line4_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[20]), line4_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[21]), line4_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[22]), input4_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[23]), input4_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[24]), line4_12.val[0]); sum0 = vector_activation(sum0, activation); vst1q_f32(output_buf_c + w + 1, sum0); } for (w = mid_w_block; w < mid_w; w++) { tmp = bias_c; tmp += weight_buf_c[10] * input_buf_c[2 * w]; tmp += weight_buf_c[11] * input_buf_c[2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[2 * w + 3]; tmp += weight_buf_c[14] * input_buf_c[2 * w + 4]; tmp += weight_buf_c[15] * input_buf_c[input_w + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w + 2 * w + 3]; tmp += weight_buf_c[19] * input_buf_c[input_w + 2 * w + 4]; tmp += weight_buf_c[20] * input_buf_c[input_w * 2 + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * 2 + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * 2 + 2 * w + 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * 2 + 2 * w + 3]; tmp += weight_buf_c[24] * input_buf_c[input_w * 2 + 2 * w + 4]; output_buf_c[w + 1] = elem_activation(tmp, activation); } if (w_remain) { tmp = bias_c; tmp += weight_buf_c[10] * input_buf_c[2 * w]; tmp += weight_buf_c[11] * input_buf_c[2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[2 * w + 2]; tmp += weight_buf_c[15] * input_buf_c[input_w + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w + 2 * w + 2]; tmp += weight_buf_c[20] * input_buf_c[input_w * 2 + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * 2 + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * 2 + 2 * w + 2]; output_buf_c[w + 1] = elem_activation(tmp, activation); } else { tmp = bias_c; tmp += weight_buf_c[10] * input_buf_c[2 * w]; tmp += weight_buf_c[11] * input_buf_c[2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[2 * w + 3]; tmp += weight_buf_c[15] * input_buf_c[input_w + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w + 2 * w + 3]; tmp += weight_buf_c[20] * input_buf_c[input_w * 2 + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * 2 + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * 2 + 2 * w + 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * 2 + 2 * w + 3]; output_buf_c[w + 1] = elem_activation(tmp, activation); } // mid height for (h = 0; h < mid_h; h++) { tmp = bias_c; tmp += weight_buf_c[2] * input_buf_c[input_w * 2 * h]; tmp += weight_buf_c[3] * input_buf_c[input_w * 2 * h + 1]; tmp += weight_buf_c[4] * input_buf_c[input_w * 2 * h + 2]; tmp += weight_buf_c[7] * input_buf_c[input_w * (2 * h + 1)]; tmp += weight_buf_c[8] * input_buf_c[input_w * (2 * h + 1) + 1]; tmp += weight_buf_c[9] * input_buf_c[input_w * (2 * h + 1) + 2]; tmp += weight_buf_c[12] * input_buf_c[input_w * (2 * h + 2)]; tmp += weight_buf_c[13] * input_buf_c[input_w * (2 * h + 2) + 1]; tmp += weight_buf_c[14] * input_buf_c[input_w * (2 * h + 2) + 2]; tmp += weight_buf_c[17] * input_buf_c[input_w * (2 * h + 3)]; tmp += weight_buf_c[18] * input_buf_c[input_w * (2 * h + 3) + 1]; tmp += weight_buf_c[19] * input_buf_c[input_w * (2 * h + 3) + 2]; tmp += weight_buf_c[22] * input_buf_c[input_w * (2 * h + 4)]; tmp += weight_buf_c[23] * input_buf_c[input_w * (2 * h + 4) + 1]; tmp += weight_buf_c[24] * input_buf_c[input_w * (2 * h + 4) + 2]; output_buf_c[output_w * (h + 1)] = elem_activation(tmp, activation); for (w = 0; w < mid_w_block; w += 4) { float32x4_t sum0 = vdupq_n_f32(bias_c); float32x4_t line0_0 = vld1q_f32(input_buf_c + input_w * 2 * h + 2 * w); float32x4_t line0_1 = vld1q_f32(input_buf_c + input_w * 2 * h + 2 * w + 4); float32x4_t line0_2 = vld1q_f32(input_buf_c + input_w * 2 * h + 2 * w + 8); float32x4x2_t line0_01 = vuzpq_f32(line0_0, line0_1); float32x4x2_t line0_12 = vuzpq_f32(line0_1, line0_2); float32x4_t input0_2 = vextq_f32(line0_01.val[0], line0_2, 1); float32x4_t input0_3 = vextq_f32(line0_0, line0_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[0]), line0_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[1]), line0_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[2]), input0_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[3]), input0_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[4]), line0_12.val[0]); float32x4_t line1_0 = vld1q_f32(input_buf_c + input_w * (2 * h + 1) + 2 * w); float32x4_t line1_1 = vld1q_f32(input_buf_c + input_w * (2 * h + 1) + 2 * w + 4); float32x4_t line1_2 = vld1q_f32(input_buf_c + input_w * (2 * h + 1) + 2 * w + 8); float32x4x2_t line1_01 = vuzpq_f32(line1_0, line1_1); float32x4x2_t line1_12 = vuzpq_f32(line1_1, line1_2); float32x4_t input1_2 = vextq_f32(line1_01.val[0], line1_2, 1); float32x4_t input1_3 = vextq_f32(line1_0, line1_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[5]), line1_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[6]), line1_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[7]), input1_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[8]), input1_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[9]), line1_12.val[0]); float32x4_t line2_0 = vld1q_f32(input_buf_c + input_w * (2 * h + 2) + 2 * w); float32x4_t line2_1 = vld1q_f32(input_buf_c + input_w * (2 * h + 2) + 2 * w + 4); float32x4_t line2_2 = vld1q_f32(input_buf_c + input_w * (2 * h + 2) + 2 * w + 8); float32x4x2_t line2_01 = vuzpq_f32(line2_0, line2_1); float32x4x2_t line2_12 = vuzpq_f32(line2_1, line2_2); float32x4_t input2_2 = vextq_f32(line2_01.val[0], line2_2, 1); float32x4_t input2_3 = vextq_f32(line2_0, line2_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[10]), line2_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[11]), line2_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[12]), input2_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[13]), input2_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[14]), line2_12.val[0]); float32x4_t line3_0 = vld1q_f32(input_buf_c + input_w * (2 * h + 3) + 2 * w); float32x4_t line3_1 = vld1q_f32(input_buf_c + input_w * (2 * h + 3) + 2 * w + 4); float32x4_t line3_2 = vld1q_f32(input_buf_c + input_w * (2 * h + 3) + 2 * w + 8); float32x4x2_t line3_01 = vuzpq_f32(line3_0, line3_1); float32x4x2_t line3_12 = vuzpq_f32(line3_1, line3_2); float32x4_t input3_2 = vextq_f32(line3_01.val[0], line3_2, 1); float32x4_t input3_3 = vextq_f32(line3_0, line3_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[15]), line3_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[16]), line3_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[17]), input3_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[18]), input3_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[19]), line3_12.val[0]); float32x4_t line4_0 = vld1q_f32(input_buf_c + input_w * (2 * h + 4) + 2 * w); float32x4_t line4_1 = vld1q_f32(input_buf_c + input_w * (2 * h + 4) + 2 * w + 4); float32x4_t line4_2 = vld1q_f32(input_buf_c + input_w * (2 * h + 4) + 2 * w + 8); float32x4x2_t line4_01 = vuzpq_f32(line4_0, line4_1); float32x4x2_t line4_12 = vuzpq_f32(line4_1, line4_2); float32x4_t input4_2 = vextq_f32(line4_01.val[0], line4_2, 1); float32x4_t input4_3 = vextq_f32(line4_0, line4_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[20]), line4_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[21]), line4_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[22]), input4_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[23]), input4_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[24]), line4_12.val[0]); sum0 = vector_activation(sum0, activation); vst1q_f32(output_buf_c + output_w * (h + 1) + w + 1, sum0); } for (w = mid_w_block; w < mid_w; w++) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * 2 * h + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * 2 * h + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * 2 * h + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * 2 * h + 2 * w + 3]; tmp += weight_buf_c[4] * input_buf_c[input_w * 2 * h + 2 * w + 4]; tmp += weight_buf_c[5] * input_buf_c[input_w * (2 * h + 1) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 3]; tmp += weight_buf_c[9] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 4]; tmp += weight_buf_c[10] * input_buf_c[input_w * (2 * h + 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 3]; tmp += weight_buf_c[14] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 4]; tmp += weight_buf_c[15] * input_buf_c[input_w * (2 * h + 3) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 3]; tmp += weight_buf_c[19] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 4]; tmp += weight_buf_c[20] * input_buf_c[input_w * (2 * h + 4) + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 3]; tmp += weight_buf_c[24] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 4]; output_buf_c[output_w * (h + 1) + w + 1] = elem_activation(tmp, activation); } if (w_remain) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * 2 * h + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * 2 * h + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * 2 * h + 2 * w + 2]; tmp += weight_buf_c[5] * input_buf_c[input_w * (2 * h + 1) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 2]; tmp += weight_buf_c[10] * input_buf_c[input_w * (2 * h + 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 2]; tmp += weight_buf_c[15] * input_buf_c[input_w * (2 * h + 3) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 2]; tmp += weight_buf_c[20] * input_buf_c[input_w * (2 * h + 4) + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 2]; output_buf_c[output_w * (h + 2) - 1] = elem_activation(tmp, activation); } else { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * 2 * h + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * 2 * h + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * 2 * h + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * 2 * h + 2 * w + 3]; tmp += weight_buf_c[5] * input_buf_c[input_w * (2 * h + 1) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 3]; tmp += weight_buf_c[10] * input_buf_c[input_w * (2 * h + 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 3]; tmp += weight_buf_c[15] * input_buf_c[input_w * (2 * h + 3) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 3]; tmp += weight_buf_c[20] * input_buf_c[input_w * (2 * h + 4) + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 3]; output_buf_c[output_w * (h + 2) - 1] = elem_activation(tmp, activation); } } if (h_remain) { tmp = bias_c; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 3)]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 3) + 1]; tmp += weight_buf_c[4] * input_buf_c[input_w * (input_h - 3) + 2]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 2)]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 2) + 1]; tmp += weight_buf_c[9] * input_buf_c[input_w * (input_h - 2) + 2]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 1)]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 1) + 1]; tmp += weight_buf_c[14] * input_buf_c[input_w * (input_h - 1) + 2]; output_buf_c[output_w * (output_h - 1)] = elem_activation(tmp, activation); for (w = 0; w < mid_w_block; w += 4) { float32x4_t sum0 = vdupq_n_f32(bias_c); float32x4_t line0_0 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w); float32x4_t line0_1 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w + 4); float32x4_t line0_2 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w + 8); float32x4x2_t line0_01 = vuzpq_f32(line0_0, line0_1); float32x4x2_t line0_12 = vuzpq_f32(line0_1, line0_2); float32x4_t input0_2 = vextq_f32(line0_01.val[0], line0_2, 1); float32x4_t input0_3 = vextq_f32(line0_0, line0_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[0]), line0_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[1]), line0_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[2]), input0_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[3]), input0_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[4]), line0_12.val[0]); float32x4_t line1_0 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w); float32x4_t line1_1 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w + 4); float32x4_t line1_2 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w + 8); float32x4x2_t line1_01 = vuzpq_f32(line1_0, line1_1); float32x4x2_t line1_12 = vuzpq_f32(line1_1, line1_2); float32x4_t input1_2 = vextq_f32(line1_01.val[0], line1_2, 1); float32x4_t input1_3 = vextq_f32(line1_0, line1_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[5]), line1_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[6]), line1_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[7]), input1_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[8]), input1_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[9]), line1_12.val[0]); float32x4_t line2_0 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w); float32x4_t line2_1 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w + 4); float32x4_t line2_2 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w + 8); float32x4x2_t line2_01 = vuzpq_f32(line2_0, line2_1); float32x4x2_t line2_12 = vuzpq_f32(line2_1, line2_2); float32x4_t input2_2 = vextq_f32(line2_01.val[0], line2_2, 1); float32x4_t input2_3 = vextq_f32(line2_0, line2_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[10]), line2_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[11]), line2_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[12]), input2_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[13]), input2_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[14]), line2_12.val[0]); sum0 = vector_activation(sum0, activation); vst1q_f32(output_buf_c + output_w * (output_h - 1) + w + 1, sum0); } for (w = mid_w_block; w < mid_w; w++) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 3) + 2 * w + 3]; tmp += weight_buf_c[4] * input_buf_c[input_w * (input_h - 3) + 2 * w + 4]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 2) + 2 * w + 3]; tmp += weight_buf_c[9] * input_buf_c[input_w * (input_h - 2) + 2 * w + 4]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 1) + 2 * w + 3]; tmp += weight_buf_c[14] * input_buf_c[input_w * (input_h - 1) + 2 * w + 4]; output_buf_c[output_w * (output_h - 1) + w + 1] = elem_activation(tmp, activation); } if (w_remain) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; output_buf_c[output_hw - 1] = elem_activation(tmp, activation); } else { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 3) + 2 * w + 3]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 2) + 2 * w + 3]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 1) + 2 * w + 3]; output_buf_c[output_hw - 1] = elem_activation(tmp, activation); } } else { tmp = bias_c; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 4)]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 4) + 1]; tmp += weight_buf_c[4] * input_buf_c[input_w * (input_h - 4) + 2]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 3)]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 3) + 1]; tmp += weight_buf_c[9] * input_buf_c[input_w * (input_h - 3) + 2]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 2)]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 2) + 1]; tmp += weight_buf_c[14] * input_buf_c[input_w * (input_h - 2) + 2]; tmp += weight_buf_c[17] * input_buf_c[input_w * (input_h - 1)]; tmp += weight_buf_c[18] * input_buf_c[input_w * (input_h - 1) + 1]; tmp += weight_buf_c[19] * input_buf_c[input_w * (input_h - 1) + 2]; output_buf_c[output_w * (output_h - 1)] = elem_activation(tmp, activation); for (w = 0; w < mid_w_block; w += 4) { float32x4_t sum0 = vdupq_n_f32(bias_c); float32x4_t line0_0 = vld1q_f32(input_buf_c + input_w * (input_h - 4) + 2 * w); float32x4_t line0_1 = vld1q_f32(input_buf_c + input_w * (input_h - 4) + 2 * w + 4); float32x4_t line0_2 = vld1q_f32(input_buf_c + input_w * (input_h - 4) + 2 * w + 8); float32x4x2_t line0_01 = vuzpq_f32(line0_0, line0_1); float32x4x2_t line0_12 = vuzpq_f32(line0_1, line0_2); float32x4_t input0_2 = vextq_f32(line0_01.val[0], line0_2, 1); float32x4_t input0_3 = vextq_f32(line0_0, line0_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[0]), line0_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[1]), line0_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[2]), input0_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[3]), input0_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[4]), line0_12.val[0]); float32x4_t line1_0 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w); float32x4_t line1_1 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w + 4); float32x4_t line1_2 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w + 8); float32x4x2_t line1_01 = vuzpq_f32(line1_0, line1_1); float32x4x2_t line1_12 = vuzpq_f32(line1_1, line1_2); float32x4_t input1_2 = vextq_f32(line1_01.val[0], line1_2, 1); float32x4_t input1_3 = vextq_f32(line1_0, line1_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[5]), line1_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[6]), line1_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[7]), input1_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[8]), input1_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[9]), line1_12.val[0]); float32x4_t line2_0 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w); float32x4_t line2_1 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w + 4); float32x4_t line2_2 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w + 8); float32x4x2_t line2_01 = vuzpq_f32(line2_0, line2_1); float32x4x2_t line2_12 = vuzpq_f32(line2_1, line2_2); float32x4_t input2_2 = vextq_f32(line2_01.val[0], line2_2, 1); float32x4_t input2_3 = vextq_f32(line2_0, line2_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[10]), line2_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[11]), line2_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[12]), input2_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[13]), input2_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[14]), line2_12.val[0]); float32x4_t line3_0 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w); float32x4_t line3_1 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w + 4); float32x4_t line3_2 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w + 8); float32x4x2_t line3_01 = vuzpq_f32(line3_0, line3_1); float32x4x2_t line3_12 = vuzpq_f32(line3_1, line3_2); float32x4_t input3_2 = vextq_f32(line3_01.val[0], line3_2, 1); float32x4_t input3_3 = vextq_f32(line3_0, line3_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[15]), line3_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[16]), line3_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[17]), input3_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[18]), input3_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[19]), line3_12.val[0]); sum0 = vector_activation(sum0, activation); vst1q_f32(output_buf_c + output_w * (output_h - 1) + w + 1, sum0); } for (w = mid_w_block; w < mid_w; w++) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 4) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 4) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 4) + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 4) + 2 * w + 3]; tmp += weight_buf_c[4] * input_buf_c[input_w * (input_h - 4) + 2 * w + 4]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 3) + 2 * w + 3]; tmp += weight_buf_c[9] * input_buf_c[input_w * (input_h - 3) + 2 * w + 4]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 2) + 2 * w + 3]; tmp += weight_buf_c[14] * input_buf_c[input_w * (input_h - 2) + 2 * w + 4]; tmp += weight_buf_c[15] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w * (input_h - 1) + 2 * w + 3]; tmp += weight_buf_c[19] * input_buf_c[input_w * (input_h - 1) + 2 * w + 4]; output_buf_c[output_w * (output_h - 1) + w + 1] = elem_activation(tmp, activation); } if (w_remain) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 4) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 4) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 4) + 2 * w + 2]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[15] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; output_buf_c[output_hw - 1] = elem_activation(tmp, activation); } else { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 4) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 4) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 4) + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 4) + 2 * w + 3]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 3) + 2 * w + 3]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 2) + 2 * w + 3]; tmp += weight_buf_c[15] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w * (input_h - 1) + 2 * w + 3]; output_buf_c[output_hw - 1] = elem_activation(tmp, activation); } } } } void depthwise_conv_k7s1(float* input, float* weight, float* bias, float* output, int input_h, int input_w, int channel, int output_h, int output_w, int activation, int num_thread) { int channel_size = input_h * input_w; int mid_w = input_w - 6; int mid_block = mid_w >> 2; int mid_h = input_h - 6; int w = 0; // #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float tmp0, tmp1, tmp2; float* input_1 = input + c * channel_size; float* input_2 = input_1 + input_w; float* input_3 = input_1 + input_w * 2; float* input_4 = input_1 + input_w * 3; float* input_5 = input_1 + input_w * 4; float* input_6 = input_1 + input_w * 5; float* input_7 = input_1 + input_w * 6; float* output_buf = output + c * channel_size; float* output_buf_1 = output_buf + output_w; float* output_buf_2 = output_buf_1 + output_w; float* weight_buf = weight + c * 49; float bias_c = bias ? bias[c] : 0; float32x4_t kernel_0_3 = vld1q_f32(weight_buf); float32x4_t kernel_4_7 = vld1q_f32(weight_buf + 4); float32x4_t kernel_8_11 = vld1q_f32(weight_buf + 8); float32x4_t kernel_12_15 = vld1q_f32(weight_buf + 12); float32x4_t kernel_16_19 = vld1q_f32(weight_buf + 16); float32x4_t kernel_20_23 = vld1q_f32(weight_buf + 20); float32x4_t kernel_24_27 = vld1q_f32(weight_buf + 24); float32x4_t kernel_28_31 = vld1q_f32(weight_buf + 28); float32x4_t kernel_32_35 = vld1q_f32(weight_buf + 32); float32x4_t kernel_36_39 = vld1q_f32(weight_buf + 36); float32x4_t kernel_40_43 = vld1q_f32(weight_buf + 40); float32x4_t kernel_44_47 = vld1q_f32(weight_buf + 44); float32x4_t kernel_48_51 = vld1q_f32(weight_buf + 48); float32x4_t line1 = vld1q_f32(input_1); float32x4_t line2 = vld1q_f32(input_2); float32x4_t line3 = vld1q_f32(input_3); float32x4_t line4 = vld1q_f32(input_4); float32x4_t line5 = vld1q_f32(input_5); float32x4_t line6 = vld1q_f32(input_6); float32x4_t kernel_10_13 = vextq_f32(kernel_8_11, kernel_12_15, 2); float32x4_t kernel_17_20 = vextq_f32(kernel_16_19, kernel_20_23, 1); float32x4_t kernel_31_34 = vextq_f32(kernel_28_31, kernel_32_35, 3); float32x4_t kernel_38_41 = vextq_f32(kernel_36_39, kernel_40_43, 2); float32x4_t kernel_45_48 = vextq_f32(kernel_44_47, kernel_48_51, 1); float32x4_t line1_1 = vld1q_f32(input_1 + 4); float32x4_t line2_1 = vld1q_f32(input_2 + 4); float32x4_t line3_1 = vld1q_f32(input_3 + 4); float32x4_t line4_1 = vld1q_f32(input_4 + 4); float32x4_t line5_1 = vld1q_f32(input_5 + 4); float32x4_t line6_1 = vld1q_f32(input_6 + 4); /* top start1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_45_48); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_10_13); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_17_20); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_24_27); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_31_34); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_38_41); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_45_48); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); float32x4_t kernel_37_40 = vextq_f32(kernel_36_39, kernel_40_43, 1); /* top start2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_23_26); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_30_33); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_37_40); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_44_47); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += weight_buf[27] * input_1[4]; tmp0 += weight_buf[34] * input_2[4]; tmp0 += weight_buf[41] * input_3[4]; tmp0 += weight_buf[48] * input_4[4]; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_16_19); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_23_26); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_30_33); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_37_40); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_44_47); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp1 += weight_buf[20] * input_1[4]; tmp1 += weight_buf[27] * input_2[4]; tmp1 += weight_buf[34] * input_3[4]; tmp1 += weight_buf[41] * input_4[4]; tmp1 += weight_buf[48] * input_5[4]; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_9_12); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_16_19); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_23_26); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_30_33); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_37_40); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_44_47); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; tmp2 += weight_buf[13] * input_1[4]; tmp2 += weight_buf[20] * input_2[4]; tmp2 += weight_buf[27] * input_3[4]; tmp2 += weight_buf[34] * input_4[4]; tmp2 += weight_buf[41] * input_5[4]; tmp2 += weight_buf[48] * input_6[4]; *output_buf_2++ = elem_activation(tmp2, activation); } float32x4_t kernel_15_18 = vextq_f32(kernel_12_15, kernel_16_19, 3); float32x4_t kernel_22_25 = vextq_f32(kernel_20_23, kernel_24_27, 2); float32x4_t kernel_29_32 = vextq_f32(kernel_28_31, kernel_32_35, 1); float32x4_t kernel_43_46 = vextq_f32(kernel_40_43, kernel_44_47, 3); /* top start3 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_36_39); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_43_46); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_15_18); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_22_25); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_29_32); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_36_39); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_43_46); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_high_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_24_27)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_31_34)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_38_41)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_45_48)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_8_11); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_15_18); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_22_25); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_29_32); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_36_39); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_43_46); float32x2_t tmp_2_2 = vadd_f32(vget_low_f32(tmp_4_2), vget_high_f32(tmp_4_2)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line1_1), vget_high_f32(kernel_10_13)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line2_1), vget_high_f32(kernel_17_20)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line3_1), vget_high_f32(kernel_24_27)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line4_1), vget_high_f32(kernel_31_34)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line5_1), vget_high_f32(kernel_38_41)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line6_1), vget_high_f32(kernel_45_48)); tmp2 = vget_lane_f32(tmp_2_2, 0) + vget_lane_f32(tmp_2_2, 1) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } float32x4_t line1_2; float32x4_t line2_2; float32x4_t line3_2; float32x4_t line4_2; float32x4_t line5_2; float32x4_t line6_2; /* top mid */ for (w = 0; w < mid_block; w++) { line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); float32x4_t tmp_4_1 = vdupq_n_f32(bias_c); float32x4_t tmp_4_2 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1, vget_low_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line1, vget_high_f32(kernel_12_15), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line1, vget_high_f32(kernel_4_7), 1); float32x4_t tmp = vextq_f32(line1, line1_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_12_15), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line1, line1_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line1, line1_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1_1, vget_low_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line1_1, vget_high_f32(kernel_16_19), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line1_1, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line1_1, line1_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line1_1, line1_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_20_23), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_12_15), 1); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2, vget_low_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line2, vget_low_f32(kernel_20_23), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line2, vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line2, line2_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line2, line2_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line2, line2_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2_1, vget_low_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line2_1, vget_low_f32(kernel_24_27), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line2_1, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line2_1, line2_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line2_1, line2_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_20_23), 0); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3, vget_high_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line3, vget_low_f32(kernel_28_31), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line3, vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line3, line3_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line3, line3_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line3, line3_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3_1, vget_high_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line3_1, vget_low_f32(kernel_32_35), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line3_1, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line3_1, line3_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line3_1, line3_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_24_27), 1); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4, vget_high_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line4, vget_high_f32(kernel_32_35), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line4, vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line4, line4_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line4, line4_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line4, line4_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4_1, vget_high_f32(kernel_44_47), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line4_1, vget_high_f32(kernel_36_39), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line4_1, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line4_1, line4_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line4_1, line4_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_32_35), 0); /* line5 */ tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line5, vget_high_f32(kernel_40_43), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line5, vget_high_f32(kernel_32_35), 1); tmp = vextq_f32(line5, line5_1, 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_40_43), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_36_39), 0); tmp = vextq_f32(line5, line5_1, 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_44_47), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_36_39), 1); tmp = vextq_f32(line5, line5_1, 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line5_1, vget_high_f32(kernel_44_47), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line5_1, vget_high_f32(kernel_36_39), 1); tmp = vextq_f32(line5_1, line5_2, 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_44_47), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_40_43), 0); tmp = vextq_f32(line5_1, line5_2, 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_40_43), 1); /* line6 */ tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line6, vget_high_f32(kernel_40_43), 0); tmp = vextq_f32(line6, line6_1, 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_40_43), 1); tmp = vextq_f32(line6, line6_1, 2); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_44_47), 0); tmp = vextq_f32(line6, line6_1, 3); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line6_1, vget_high_f32(kernel_44_47), 0); tmp = vextq_f32(line6_1, line6_2, 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_44_47), 1); tmp = vextq_f32(line6_1, line6_2, 2); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); tmp_4_1 = vector_activation(tmp_4_1, activation); tmp_4_2 = vector_activation(tmp_4_2, activation); vst1q_f32(output_buf, tmp_4_0); vst1q_f32(output_buf_1, tmp_4_1); vst1q_f32(output_buf_2, tmp_4_2); output_buf += 4; output_buf_1 += 4; output_buf_2 += 4; line1 = line1_1; line2 = line2_1; line3 = line3_1; line4 = line4_1; line5 = line5_1; line6 = line6_1; line1_1 = line1_2; line2_1 = line2_2; line3_1 = line3_2; line4_1 = line4_2; line5_1 = line5_2; line6_1 = line6_2; } float32x4_t zero = vdupq_n_f32(0.0); float32x4_t kernel_7_10 = vextq_f32(kernel_4_7, kernel_8_11, 3); float32x4_t kernel_14_17 = vextq_f32(kernel_12_15, kernel_16_19, 2); float32x4_t kernel_21_24 = vextq_f32(kernel_20_23, kernel_24_27, 1); float32x4_t kernel_35_38 = vextq_f32(kernel_32_35, kernel_36_39, 3); float32x4_t kernel_42_45 = vextq_f32(kernel_40_43, kernel_44_47, 2); line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); for (w = mid_block * 4; w < mid_w; w++) { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_42_45); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_21_24); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_28_31); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_35_38); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_42_45); float32x4_t tmp = vextq_f32(zero, line1_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_17_20); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_10_13); tmp = vextq_f32(zero, line2_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_24_27); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_17_20); tmp = vextq_f32(zero, line3_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_31_34); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_24_27); tmp = vextq_f32(zero, line4_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_45_48); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_38_41); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_31_34); tmp = vextq_f32(zero, line5_1, 3); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_45_48); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_38_41); tmp = vextq_f32(zero, line6_1, 3); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); *output_buf_1++ = elem_activation(tmp1, activation); *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_2, 1); line2_1 = vextq_f32(line2_1, line2_2, 1); line3_1 = vextq_f32(line3_1, line3_2, 1); line4_1 = vextq_f32(line4_1, line4_2, 1); line5_1 = vextq_f32(line5_1, line5_2, 1); line6_1 = vextq_f32(line6_1, line6_2, 1); } /* top end1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_37_40)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_44_47)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_42_45); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_high_f32(kernel_16_19)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_23_26)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_30_33)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_37_40)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_44_47)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_21_24); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_28_31); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_35_38); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_42_45); float32x2_t tmp_2_2 = vadd_f32(vget_low_f32(tmp_4_2), vget_high_f32(tmp_4_2)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line1_1), vget_high_f32(kernel_9_12)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line2_1), vget_high_f32(kernel_16_19)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line3_1), vget_high_f32(kernel_23_26)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line4_1), vget_high_f32(kernel_30_33)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line5_1), vget_high_f32(kernel_37_40)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line6_1), vget_high_f32(kernel_44_47)); tmp2 = vget_lane_f32(tmp_2_2, 0) + vget_lane_f32(tmp_2_2, 1) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); } /* top end2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[25]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[32]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[39]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[46]; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_42_45); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp1 += vgetq_lane_f32(line1_1, 0) * weight_buf[18]; tmp1 += vgetq_lane_f32(line2_1, 0) * weight_buf[25]; tmp1 += vgetq_lane_f32(line3_1, 0) * weight_buf[32]; tmp1 += vgetq_lane_f32(line4_1, 0) * weight_buf[39]; tmp1 += vgetq_lane_f32(line5_1, 0) * weight_buf[46]; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_21_24); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_28_31); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_35_38); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_42_45); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; tmp2 += vgetq_lane_f32(line1_1, 0) * weight_buf[11]; tmp2 += vgetq_lane_f32(line2_1, 0) * weight_buf[18]; tmp2 += vgetq_lane_f32(line3_1, 0) * weight_buf[25]; tmp2 += vgetq_lane_f32(line4_1, 0) * weight_buf[32]; tmp2 += vgetq_lane_f32(line5_1, 0) * weight_buf[39]; tmp2 += vgetq_lane_f32(line6_1, 0) * weight_buf[46]; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); } /* top end3 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_42_45); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_21_24); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_28_31); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_35_38); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_42_45); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } float32x4_t kernel_1_4 = vextq_f32(kernel_0_3, kernel_4_7, 1); float32x4_t kernel_2_5 = vextq_f32(kernel_0_3, kernel_4_7, 2); float32x4_t kernel_3_6 = vextq_f32(kernel_0_3, kernel_4_7, 3); output_buf += output_w * 2; float32x4_t line7; float32x4_t line7_1; float32x4_t line7_2; /* mid */ for (int h = 0; h < mid_h; h++) { input_1 = input + c * channel_size + h * input_w; input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; input_6 = input_5 + input_w; input_7 = input_6 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); line6 = vld1q_f32(input_6); line7 = vld1q_f32(input_7); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); line6_1 = vld1q_f32(input_6 + 4); line7_1 = vld1q_f32(input_7 + 4); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_2_5); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_9_12); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_16_19); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_23_26); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_30_33); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_37_40); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_44_47); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[6]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[13]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[20]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[27]; tmp0 += vgetq_lane_f32(line5_1, 0) * weight_buf[34]; tmp0 += vgetq_lane_f32(line6_1, 0) * weight_buf[41]; tmp0 += vgetq_lane_f32(line7_1, 0) * weight_buf[48]; *output_buf++ = elem_activation(tmp0, activation); } { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_36_39); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_43_46); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_high_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } for (w = 0; w < mid_block; w++) { line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); line7_2 = vld1q_f32(input_7 + 8 + 4 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1, vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line1, line1_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line1, line1_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line1, line1_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1_1, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line1_1, line1_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line1_1, line1_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2, vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line2, line2_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line2, line2_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line2, line2_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2_1, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line2_1, line2_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line2_1, line2_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3, vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line3, line3_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line3, line3_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line3, line3_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3_1, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line3_1, line3_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line3_1, line3_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4, vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line4, line4_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line4, line4_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line4, line4_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4_1, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line4_1, line4_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line4_1, line4_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line5, vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line5, line5_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line5, line5_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line5, line5_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line5_1, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line5_1, line5_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line5_1, line5_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); /* line6 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line6, vget_high_f32(kernel_32_35), 1); tmp = vextq_f32(line6, line6_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp = vextq_f32(line6, line6_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp = vextq_f32(line6, line6_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line6_1, vget_high_f32(kernel_36_39), 1); tmp = vextq_f32(line6_1, line6_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp = vextq_f32(line6_1, line6_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); /* line7 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line7, vget_high_f32(kernel_40_43), 0); tmp = vextq_f32(line7, line7_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_40_43), 1); tmp = vextq_f32(line7, line7_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 0); tmp = vextq_f32(line7, line7_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line7_1, vget_high_f32(kernel_44_47), 0); tmp = vextq_f32(line7_1, line7_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 1); tmp = vextq_f32(line7_1, line7_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; line1 = line1_1; line2 = line2_1; line3 = line3_1; line4 = line4_1; line5 = line5_1; line6 = line6_1; line7 = line7_1; line1_1 = line1_2; line2_1 = line2_2; line3_1 = line3_2; line4_1 = line4_2; line5_1 = line5_2; line6_1 = line6_2; line7_1 = line7_2; } line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); line7_2 = vld1q_f32(input_7 + 8 + 4 * w); for (w = mid_block * 4; w < mid_w; w++) { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); float32x4_t tmp = vextq_f32(zero, line1_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_3_6); tmp = vextq_f32(zero, line2_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_10_13); tmp = vextq_f32(zero, line3_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_17_20); tmp = vextq_f32(zero, line4_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_24_27); tmp = vextq_f32(zero, line5_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_31_34); tmp = vextq_f32(zero, line6_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_38_41); tmp = vextq_f32(zero, line7_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line7 = vextq_f32(line7, line7_1, 1); line1_1 = vextq_f32(line1_1, line1_2, 1); line2_1 = vextq_f32(line2_1, line2_2, 1); line3_1 = vextq_f32(line3_1, line3_2, 1); line4_1 = vextq_f32(line4_1, line4_2, 1); line5_1 = vextq_f32(line5_1, line5_2, 1); line6_1 = vextq_f32(line6_1, line6_2, 1); line7_1 = vextq_f32(line7_1, line7_2, 1); } { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_2_5)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_37_40)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_high_f32(kernel_44_47)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line7 = vextq_f32(line7, line7_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); line7_1 = vextq_f32(line7_1, line7_1, 1); } { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[4]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[11]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[18]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[25]; tmp0 += vgetq_lane_f32(line5_1, 0) * weight_buf[32]; tmp0 += vgetq_lane_f32(line6_1, 0) * weight_buf[39]; tmp0 += vgetq_lane_f32(line7_1, 0) * weight_buf[46]; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line7 = vextq_f32(line7, line7_1, 1); } { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } } /* bottom start1 */ input_1 = input + c * channel_size + input_w * (input_h - 6); input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; input_6 = input_5 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); line6 = vld1q_f32(input_6); output_buf_1 = output_buf + input_w; output_buf_2 = output_buf_1 + input_w; { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_38_41); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_3_6); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_31_34); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_3_6); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_10_13); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_17_20); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_24_27); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); line6_1 = vld1q_f32(input_6 + 4); /* bottom start2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_2_5); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_9_12); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_16_19); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_23_26); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_30_33); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_37_40); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[6]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[13]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[20]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[27]; tmp0 += vgetq_lane_f32(line5_1, 0) * weight_buf[34]; tmp0 += vgetq_lane_f32(line6_1, 0) * weight_buf[41]; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_2_5); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_9_12); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_16_19); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_23_26); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_30_33); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp1 += vgetq_lane_f32(line2_1, 0) * weight_buf[6]; tmp1 += vgetq_lane_f32(line3_1, 0) * weight_buf[13]; tmp1 += vgetq_lane_f32(line4_1, 0) * weight_buf[20]; tmp1 += vgetq_lane_f32(line5_1, 0) * weight_buf[27]; tmp1 += vgetq_lane_f32(line6_1, 0) * weight_buf[34]; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_2_5); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_9_12); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_16_19); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_23_26); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; tmp2 += vgetq_lane_f32(line3_1, 0) * weight_buf[6]; tmp2 += vgetq_lane_f32(line4_1, 0) * weight_buf[13]; tmp2 += vgetq_lane_f32(line5_1, 0) * weight_buf[20]; tmp2 += vgetq_lane_f32(line6_1, 0) * weight_buf[27]; *output_buf_2++ = elem_activation(tmp2, activation); } /* bottom start3 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_36_39); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_38_41)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_1_4); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_8_11); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_15_18); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_22_25); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_29_32); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_3_6)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_24_27)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_31_34)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_1_4); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_8_11); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_15_18); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_22_25); float32x2_t tmp_2_2 = vadd_f32(vget_low_f32(tmp_4_2), vget_high_f32(tmp_4_2)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line3_1), vget_high_f32(kernel_3_6)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line4_1), vget_high_f32(kernel_10_13)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line5_1), vget_high_f32(kernel_17_20)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line6_1), vget_high_f32(kernel_24_27)); tmp2 = vget_lane_f32(tmp_2_2, 0) + vget_lane_f32(tmp_2_2, 1) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } /* bottom mid */ for (w = 0; w < mid_block; w++) { line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); float32x4_t tmp_4_1 = vdupq_n_f32(bias_c); float32x4_t tmp_4_2 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1, vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line1, line1_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line1, line1_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line1, line1_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1_1, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line1_1, line1_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line1_1, line1_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2, vget_high_f32(kernel_4_7), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line2, vget_low_f32(kernel_0_3), 0); tmp = vextq_f32(line2, line2_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line2, line2_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line2, line2_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_0_3), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2_1, vget_high_f32(kernel_8_11), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line2_1, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line2_1, line2_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line2_1, line2_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_4_7), 0); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3, vget_high_f32(kernel_12_15), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line3, vget_high_f32(kernel_4_7), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line3, vget_low_f32(kernel_0_3), 0); tmp = vextq_f32(line3, line3_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line3, line3_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line3, line3_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_0_3), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3_1, vget_high_f32(kernel_16_19), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line3_1, vget_high_f32(kernel_8_11), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line3_1, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line3_1, line3_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line3_1, line3_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_4_7), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4, vget_low_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line4, vget_high_f32(kernel_12_15), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line4, vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line4, line4_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_12_15), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line4, line4_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line4, line4_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4_1, vget_low_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line4_1, vget_high_f32(kernel_16_19), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line4_1, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line4_1, line4_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line4_1, line4_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_20_23), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_12_15), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line5, vget_low_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line5, vget_low_f32(kernel_20_23), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line5, vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line5, line5_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line5, line5_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line5, line5_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line5_1, vget_low_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line5_1, vget_low_f32(kernel_24_27), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line5_1, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line5_1, line5_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line5_1, line5_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_20_23), 0); /* line6 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line6, vget_high_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line6, vget_low_f32(kernel_28_31), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line6, vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line6, line6_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line6, line6_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line6, line6_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line6_1, vget_high_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line6_1, vget_low_f32(kernel_32_35), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line6_1, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line6_1, line6_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line6_1, line6_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; tmp_4_1 = vector_activation(tmp_4_1, activation); vst1q_f32(output_buf_1, tmp_4_1); output_buf_1 += 4; tmp_4_2 = vector_activation(tmp_4_2, activation); vst1q_f32(output_buf_2, tmp_4_2); output_buf_2 += 4; line1 = line1_1; line2 = line2_1; line3 = line3_1; line4 = line4_1; line5 = line5_1; line6 = line6_1; line1_1 = line1_2; line2_1 = line2_2; line3_1 = line3_2; line4_1 = line4_2; line5_1 = line5_2; line6_1 = line6_2; } line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); for (w = mid_block * 4; w < mid_w; w++) { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_28_31); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_0_3); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_21_24); float32x4_t tmp = vextq_f32(zero, line1_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_3_6); tmp = vextq_f32(zero, line2_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_3_6); tmp = vextq_f32(zero, line3_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_10_13); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_3_6); tmp = vextq_f32(zero, line4_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_17_20); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_10_13); tmp = vextq_f32(zero, line5_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_24_27); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_17_20); tmp = vextq_f32(zero, line6_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_31_34); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_24_27); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_2, 1); line2_1 = vextq_f32(line2_1, line2_2, 1); line3_1 = vextq_f32(line3_1, line3_2, 1); line4_1 = vextq_f32(line4_1, line4_2, 1); line5_1 = vextq_f32(line5_1, line5_2, 1); line6_1 = vextq_f32(line6_1, line6_2, 1); } /* bottom end1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_2_5)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_37_40)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_28_31); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_2_5)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_9_12)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_16_19)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_23_26)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_30_33)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_0_3); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_21_24); float32x2_t tmp_2_2 = vadd_f32(vget_low_f32(tmp_4_2), vget_high_f32(tmp_4_2)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line3_1), vget_high_f32(kernel_2_5)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line4_1), vget_high_f32(kernel_9_12)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line5_1), vget_high_f32(kernel_16_19)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line6_1), vget_high_f32(kernel_23_26)); tmp2 = vget_lane_f32(tmp_2_2, 0) + vget_lane_f32(tmp_2_2, 1) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); } /* bottom end2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[4]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[11]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[18]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[25]; tmp0 += vgetq_lane_f32(line5_1, 0) * weight_buf[32]; tmp0 += vgetq_lane_f32(line6_1, 0) * weight_buf[39]; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_28_31); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp1 += vgetq_lane_f32(line2_1, 0) * weight_buf[4]; tmp1 += vgetq_lane_f32(line3_1, 0) * weight_buf[11]; tmp1 += vgetq_lane_f32(line4_1, 0) * weight_buf[18]; tmp1 += vgetq_lane_f32(line5_1, 0) * weight_buf[25]; tmp1 += vgetq_lane_f32(line6_1, 0) * weight_buf[32]; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_0_3); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_21_24); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; tmp2 += vgetq_lane_f32(line3_1, 0) * weight_buf[4]; tmp2 += vgetq_lane_f32(line4_1, 0) * weight_buf[11]; tmp2 += vgetq_lane_f32(line5_1, 0) * weight_buf[18]; tmp2 += vgetq_lane_f32(line6_1, 0) * weight_buf[25]; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); } /* bottom end3 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_28_31); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_0_3); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_21_24); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } } } void depthwise_conv_k7s2(float* input, float* weight, float* bias, float* output, int input_h, int input_w, int channel, int output_h, int output_w, int activation, int num_thread) { int input_hw = input_h * input_w; int output_hw = output_h * output_w; int mid_w = output_w - 3; int mid_h = output_h - 3; int remain_h = input_h & 0x1; int remain_w = input_w & 0x1; if (remain_h) mid_h--; if (remain_w) mid_w--; int mid_block = mid_w >> 2; int w = 0; //#pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float tmp0, tmp1; float* output_buf = output + c * output_hw; float* output_buf_1 = output_buf + output_w; float* weight_buf = weight + c * 49; float bias_c = bias ? bias[c] : 0; float* input_1 = input + c * input_hw; float* input_2 = input_1 + input_w; float* input_3 = input_2 + input_w; float* input_4 = input_3 + input_w; float* input_5 = input_4 + input_w; float* input_6 = input_5 + input_w; float32x4_t kernel_0_3 = vld1q_f32(weight_buf); float32x4_t kernel_4_7 = vld1q_f32(weight_buf + 4); float32x4_t kernel_8_11 = vld1q_f32(weight_buf + 8); float32x4_t kernel_12_15 = vld1q_f32(weight_buf + 12); float32x4_t kernel_16_19 = vld1q_f32(weight_buf + 16); float32x4_t kernel_20_23 = vld1q_f32(weight_buf + 20); float32x4_t kernel_24_27 = vld1q_f32(weight_buf + 24); float32x4_t kernel_28_31 = vld1q_f32(weight_buf + 28); float32x4_t kernel_32_35 = vld1q_f32(weight_buf + 32); float32x4_t kernel_36_39 = vld1q_f32(weight_buf + 36); float32x4_t kernel_40_43 = vld1q_f32(weight_buf + 40); float32x4_t kernel_44_47 = vld1q_f32(weight_buf + 44); float32x4_t kernel_48_51 = vld1q_f32(weight_buf + 48); float32x4_t line1 = vld1q_f32(input_1); float32x4_t line2 = vld1q_f32(input_2); float32x4_t line3 = vld1q_f32(input_3); float32x4_t line4 = vld1q_f32(input_4); float32x4_t line5 = vld1q_f32(input_5); float32x4_t line6 = vld1q_f32(input_6); float32x4_t kernel_10_13 = vextq_f32(kernel_8_11, kernel_12_15, 2); float32x4_t kernel_17_20 = vextq_f32(kernel_16_19, kernel_20_23, 1); float32x4_t kernel_31_34 = vextq_f32(kernel_28_31, kernel_32_35, 3); float32x4_t kernel_38_41 = vextq_f32(kernel_36_39, kernel_40_43, 2); float32x4_t kernel_45_48 = vextq_f32(kernel_44_47, kernel_48_51, 1); /* top left1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_45_48); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } float32x4_t line1_1 = vld1q_f32(input_1 + 4); float32x4_t line2_1 = vld1q_f32(input_2 + 4); float32x4_t line3_1 = vld1q_f32(input_3 + 4); float32x4_t line4_1 = vld1q_f32(input_4 + 4); float32x4_t line5_1 = vld1q_f32(input_5 + 4); float32x4_t line6_1 = vld1q_f32(input_6 + 4); float32x4_t kernel_15_18 = vextq_f32(kernel_12_15, kernel_16_19, 3); float32x4_t kernel_22_25 = vextq_f32(kernel_20_23, kernel_24_27, 2); float32x4_t kernel_29_32 = vextq_f32(kernel_28_31, kernel_32_35, 1); float32x4_t kernel_43_46 = vextq_f32(kernel_40_43, kernel_44_47, 3); /* top left2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_36_39); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_43_46); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_8_11); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_15_18); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_22_25); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_29_32); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_36_39); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_43_46); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_high_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_24_27)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_31_34)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_38_41)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_45_48)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } /* top mid */ float32x4x2_t line_1_01 = vuzpq_f32(line1, line1_1); float32x4x2_t line_2_01 = vuzpq_f32(line2, line2_1); float32x4x2_t line_3_01 = vuzpq_f32(line3, line3_1); float32x4x2_t line_4_01 = vuzpq_f32(line4, line4_1); float32x4x2_t line_5_01 = vuzpq_f32(line5, line5_1); float32x4x2_t line_6_01 = vuzpq_f32(line6, line6_1); for (w = 0; w < mid_block; w++) { float32x4x2_t line_1_23 = vld2q_f32(input_1 + 8 + 8 * w); float32x4x2_t line_2_23 = vld2q_f32(input_2 + 8 + 8 * w); float32x4x2_t line_3_23 = vld2q_f32(input_3 + 8 + 8 * w); float32x4x2_t line_4_23 = vld2q_f32(input_4 + 8 + 8 * w); float32x4x2_t line_5_23 = vld2q_f32(input_5 + 8 + 8 * w); float32x4x2_t line_6_23 = vld2q_f32(input_6 + 8 + 8 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); float32x4_t tmp_4_1 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_1_01.val[1], vget_low_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_1_01.val[1], vget_high_f32(kernel_4_7), 1); float32x4_t tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 1); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_2_01.val[1], vget_low_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_2_01.val[1], vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 1); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_20_23), 0); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_3_01.val[1], vget_high_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_3_01.val[1], vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 0); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 1); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_4_01.val[1], vget_high_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_4_01.val[1], vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 1); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_32_35), 0); /* line5 */ tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_5_01.val[1], vget_high_f32(kernel_32_35), 1); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_36_39), 0); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_36_39), 1); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_36_39), 0); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_36_39), 1); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_40_43), 0); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_40_43), 1); /* line6 */ tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_6_01.val[1], vget_high_f32(kernel_40_43), 0); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_40_43), 1); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_44_47), 0); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_44_47), 1); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_44_47), 0); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_44_47), 1); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); tmp_4_1 = vector_activation(tmp_4_1, activation); vst1q_f32(output_buf, tmp_4_0); vst1q_f32(output_buf_1, tmp_4_1); output_buf += 4; output_buf_1 += 4; line_1_01 = line_1_23; line_2_01 = line_2_23; line_3_01 = line_3_23; line_4_01 = line_4_23; line_5_01 = line_5_23; line_6_01 = line_6_23; } line_1_01 = vzipq_f32(line_1_01.val[0], line_1_01.val[1]); line_2_01 = vzipq_f32(line_2_01.val[0], line_2_01.val[1]); line_3_01 = vzipq_f32(line_3_01.val[0], line_3_01.val[1]); line_4_01 = vzipq_f32(line_4_01.val[0], line_4_01.val[1]); line_5_01 = vzipq_f32(line_5_01.val[0], line_5_01.val[1]); line_6_01 = vzipq_f32(line_6_01.val[0], line_6_01.val[1]); line1 = line_1_01.val[0]; line1_1 = line_1_01.val[1]; line2 = line_2_01.val[0]; line2_1 = line_2_01.val[1]; line3 = line_3_01.val[0]; line3_1 = line_3_01.val[1]; line4 = line_4_01.val[0]; line4_1 = line_4_01.val[1]; line5 = line_5_01.val[0]; line5_1 = line_5_01.val[1]; line6 = line_6_01.val[0]; line6_1 = line_6_01.val[1]; float32x4_t kernel_7_10 = vextq_f32(kernel_4_7, kernel_8_11, 3); float32x4_t kernel_14_17 = vextq_f32(kernel_12_15, kernel_16_19, 2); float32x4_t kernel_21_24 = vextq_f32(kernel_20_23, kernel_24_27, 1); float32x4_t kernel_35_38 = vextq_f32(kernel_32_35, kernel_36_39, 3); float32x4_t kernel_42_45 = vextq_f32(kernel_40_43, kernel_44_47, 2); float32x4_t zero = vdupq_n_f32(0.0); float32x4_t kernel_0789 = vextq_f32(zero, kernel_7_10, 3); float32x4_t kernel_0141516 = vextq_f32(zero, kernel_14_17, 3); float32x4_t kernel_0212223 = vextq_f32(zero, kernel_21_24, 3); float32x4_t kernel_0282930 = vextq_f32(zero, kernel_28_31, 3); float32x4_t kernel_0353637 = vextq_f32(zero, kernel_35_38, 3); float32x4_t kernel_0424344 = vextq_f32(zero, kernel_42_45, 3); for (w = mid_block * 4; w < mid_w; w++) { float32x4_t line1_2 = vld1q_f32(input_1 + 8 + 2 * w); float32x4_t line2_2 = vld1q_f32(input_2 + 8 + 2 * w); float32x4_t line3_2 = vld1q_f32(input_3 + 8 + 2 * w); float32x4_t line4_2 = vld1q_f32(input_4 + 8 + 2 * w); float32x4_t line5_2 = vld1q_f32(input_5 + 8 + 2 * w); float32x4_t line6_2 = vld1q_f32(input_6 + 8 + 2 * w); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0424344); tmp_4_0 = vmlaq_f32(tmp_4_0, line1_1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line2_1, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line3_1, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line4_1, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_0789); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_0141516); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_0212223); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_0282930); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_0353637); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_0424344); tmp_4_1 = vmlaq_f32(tmp_4_1, line1_1, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line2_1, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line3_1, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, line4_1, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, line5_1, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, line6_1, kernel_45_48); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); line1_1 = vextq_f32(line1_1, line1_2, 2); line2_1 = vextq_f32(line2_1, line2_2, 2); line3_1 = vextq_f32(line3_1, line3_2, 2); line4_1 = vextq_f32(line4_1, line4_2, 2); line5_1 = vextq_f32(line5_1, line5_2, 2); line6_1 = vextq_f32(line6_1, line6_2, 2); } /* top right */ if (remain_w) { float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); float32x4_t kernel_37_40 = vextq_f32(kernel_36_39, kernel_40_43, 1); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_37_40)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_44_47)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_42_45); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_high_f32(kernel_9_12)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_16_19)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_23_26)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_30_33)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_37_40)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_44_47)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_42_45); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } } else { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0424344); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_low_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_low_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_low_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_0789); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_0141516); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_0212223); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_0282930); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_0353637); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_0424344); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_low_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_low_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_low_f32(kernel_24_27)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_low_f32(kernel_31_34)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_low_f32(kernel_38_41)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_low_f32(kernel_45_48)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } float* input_7; output_buf = output_buf_1; float32x4_t kernel_3_6 = vextq_f32(kernel_0_3, kernel_4_7, 3); float32x4_t kernel_1_4 = vextq_f32(kernel_0_3, kernel_4_7, 1); float32x4_t kernel_0012 = vextq_f32(zero, kernel_0_3, 3); /* mid */ for (int h = 0; h < mid_h; h++) { input_1 = input + c * input_hw + input_w * (1 + 2 * h); input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; input_6 = input_5 + input_w; input_7 = input_6 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); line6 = vld1q_f32(input_6); float32x4_t line7 = vld1q_f32(input_7); /* mid left 1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); line6_1 = vld1q_f32(input_6 + 4); /* mid left 2 */ float32x4_t line7_1 = vld1q_f32(input_7 + 4); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_36_39); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_43_46); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_high_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line_1_01 = vuzpq_f32(line1, line1_1); line_2_01 = vuzpq_f32(line2, line2_1); line_3_01 = vuzpq_f32(line3, line3_1); line_4_01 = vuzpq_f32(line4, line4_1); line_5_01 = vuzpq_f32(line5, line5_1); line_6_01 = vuzpq_f32(line6, line6_1); float32x4x2_t line_7_01 = vuzpq_f32(line7, line7_1); /* mid mid */ for (w = 0; w < mid_block; w++) { float32x4x2_t line_1_23 = vld2q_f32(input_1 + 8 + 8 * w); float32x4x2_t line_2_23 = vld2q_f32(input_2 + 8 + 8 * w); float32x4x2_t line_3_23 = vld2q_f32(input_3 + 8 + 8 * w); float32x4x2_t line_4_23 = vld2q_f32(input_4 + 8 + 8 * w); float32x4x2_t line_5_23 = vld2q_f32(input_5 + 8 + 8 * w); float32x4x2_t line_6_23 = vld2q_f32(input_6 + 8 + 8 * w); float32x4x2_t line_7_23 = vld2q_f32(input_7 + 8 + 8 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_1_01.val[1], vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_2_01.val[1], vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_3_01.val[1], vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_4_01.val[1], vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_5_01.val[1], vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); /* line6 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_6_01.val[1], vget_high_f32(kernel_32_35), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); /* line7 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_7_01.val[1], vget_high_f32(kernel_40_43), 0); tmp = vextq_f32(line_7_01.val[0], line_7_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_40_43), 1); tmp = vextq_f32(line_7_01.val[1], line_7_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 0); tmp = vextq_f32(line_7_01.val[0], line_7_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 1); tmp = vextq_f32(line_7_01.val[1], line_7_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 0); tmp = vextq_f32(line_7_01.val[0], line_7_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 1); tmp = vextq_f32(line_7_01.val[1], line_7_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; line_1_01 = line_1_23; line_2_01 = line_2_23; line_3_01 = line_3_23; line_4_01 = line_4_23; line_5_01 = line_5_23; line_6_01 = line_6_23; line_7_01 = line_7_23; } line_1_01 = vzipq_f32(line_1_01.val[0], line_1_01.val[1]); line_2_01 = vzipq_f32(line_2_01.val[0], line_2_01.val[1]); line_3_01 = vzipq_f32(line_3_01.val[0], line_3_01.val[1]); line_4_01 = vzipq_f32(line_4_01.val[0], line_4_01.val[1]); line_5_01 = vzipq_f32(line_5_01.val[0], line_5_01.val[1]); line_6_01 = vzipq_f32(line_6_01.val[0], line_6_01.val[1]); line_7_01 = vzipq_f32(line_7_01.val[0], line_7_01.val[1]); line1 = line_1_01.val[0]; line1_1 = line_1_01.val[1]; line2 = line_2_01.val[0]; line2_1 = line_2_01.val[1]; line3 = line_3_01.val[0]; line3_1 = line_3_01.val[1]; line4 = line_4_01.val[0]; line4_1 = line_4_01.val[1]; line5 = line_5_01.val[0]; line5_1 = line_5_01.val[1]; line6 = line_6_01.val[0]; line6_1 = line_6_01.val[1]; line7 = line_7_01.val[0]; line7_1 = line_7_01.val[1]; for (w = mid_block * 4; w < mid_w; w++) { float32x4_t line1_2 = vld1q_f32(input_1 + 8 + 2 * w); float32x4_t line2_2 = vld1q_f32(input_2 + 8 + 2 * w); float32x4_t line3_2 = vld1q_f32(input_3 + 8 + 2 * w); float32x4_t line4_2 = vld1q_f32(input_4 + 8 + 2 * w); float32x4_t line5_2 = vld1q_f32(input_5 + 8 + 2 * w); float32x4_t line6_2 = vld1q_f32(input_6 + 8 + 2 * w); float32x4_t line7_2 = vld1q_f32(input_7 + 8 + 2 * w); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_0424344); tmp_4_0 = vmlaq_f32(tmp_4_0, line1_1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2_1, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3_1, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4_1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5_1, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6_1, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line7_1, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); line7 = vextq_f32(line7, line7_1, 2); line1_1 = vextq_f32(line1_1, line1_2, 2); line2_1 = vextq_f32(line2_1, line2_2, 2); line3_1 = vextq_f32(line3_1, line3_2, 2); line4_1 = vextq_f32(line4_1, line4_2, 2); line5_1 = vextq_f32(line5_1, line5_2, 2); line6_1 = vextq_f32(line6_1, line6_2, 2); line7_1 = vextq_f32(line7_1, line7_2, 2); } /* mid right */ if (remain_w) { float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); float32x4_t kernel_37_40 = vextq_f32(kernel_36_39, kernel_40_43, 1); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line7 = vextq_f32(line7, line7_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); line7_1 = vextq_f32(line7_1, line7_1, 1); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_4_7)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_37_40)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_high_f32(kernel_44_47)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); line7 = vextq_f32(line7, line7_1, 2); tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } else { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_0424344); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_low_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_low_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_low_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_low_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_low_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_low_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } } /* bottom */ if (remain_h) { output_buf_1 = output_buf + output_w; input_1 = input + c * input_hw + (input_h - 6) * input_w; input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; input_6 = input_5 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); line6 = vld1q_f32(input_6); /* bottom 1 left */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_38_41); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_3_6); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_24_27); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); line6_1 = vld1q_f32(input_6 + 4); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_36_39); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_38_41)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_1_4); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_8_11); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_15_18); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_22_25); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_3_6)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_24_27)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } line_1_01 = vuzpq_f32(line1, line1_1); line_2_01 = vuzpq_f32(line2, line2_1); line_3_01 = vuzpq_f32(line3, line3_1); line_4_01 = vuzpq_f32(line4, line4_1); line_5_01 = vuzpq_f32(line5, line5_1); line_6_01 = vuzpq_f32(line6, line6_1); /* bottom 1 mid */ for (w = 0; w < mid_block; w++) { float32x4x2_t line_1_23 = vld2q_f32(input_1 + 8 + 8 * w); float32x4x2_t line_2_23 = vld2q_f32(input_2 + 8 + 8 * w); float32x4x2_t line_3_23 = vld2q_f32(input_3 + 8 + 8 * w); float32x4x2_t line_4_23 = vld2q_f32(input_4 + 8 + 8 * w); float32x4x2_t line_5_23 = vld2q_f32(input_5 + 8 + 8 * w); float32x4x2_t line_6_23 = vld2q_f32(input_6 + 8 + 8 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); float32x4_t tmp_4_1 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_1_01.val[1], vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_2_01.val[1], vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_3_01.val[1], vget_high_f32(kernel_12_15), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_3_01.val[1], vget_low_f32(kernel_0_3), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_0_3), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_4_7), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_4_01.val[1], vget_low_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_4_01.val[1], vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_5_01.val[1], vget_low_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_5_01.val[1], vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_20_23), 0); /* line6 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_6_01.val[1], vget_high_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_6_01.val[1], vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; tmp_4_1 = vector_activation(tmp_4_1, activation); vst1q_f32(output_buf_1, tmp_4_1); output_buf_1 += 4; line_1_01 = line_1_23; line_2_01 = line_2_23; line_3_01 = line_3_23; line_4_01 = line_4_23; line_5_01 = line_5_23; line_6_01 = line_6_23; } line_1_01 = vzipq_f32(line_1_01.val[0], line_1_01.val[1]); line_2_01 = vzipq_f32(line_2_01.val[0], line_2_01.val[1]); line_3_01 = vzipq_f32(line_3_01.val[0], line_3_01.val[1]); line_4_01 = vzipq_f32(line_4_01.val[0], line_4_01.val[1]); line_5_01 = vzipq_f32(line_5_01.val[0], line_5_01.val[1]); line_6_01 = vzipq_f32(line_6_01.val[0], line_6_01.val[1]); line1 = line_1_01.val[0]; line1_1 = line_1_01.val[1]; line2 = line_2_01.val[0]; line2_1 = line_2_01.val[1]; line3 = line_3_01.val[0]; line3_1 = line_3_01.val[1]; line4 = line_4_01.val[0]; line4_1 = line_4_01.val[1]; line5 = line_5_01.val[0]; line5_1 = line_5_01.val[1]; line6 = line_6_01.val[0]; line6_1 = line_6_01.val[1]; for (w = mid_block * 4; w < mid_w; w++) { float32x4_t line1_2 = vld1q_f32(input_1 + 8 + 2 * w); float32x4_t line2_2 = vld1q_f32(input_2 + 8 + 2 * w); float32x4_t line3_2 = vld1q_f32(input_3 + 8 + 2 * w); float32x4_t line4_2 = vld1q_f32(input_4 + 8 + 2 * w); float32x4_t line5_2 = vld1q_f32(input_5 + 8 + 2 * w); float32x4_t line6_2 = vld1q_f32(input_6 + 8 + 2 * w); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line1_1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2_1, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3_1, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4_1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5_1, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6_1, kernel_38_41); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_0012); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_0789); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_0141516); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_0212223); tmp_4_1 = vmlaq_f32(tmp_4_1, line3_1, kernel_3_6); tmp_4_1 = vmlaq_f32(tmp_4_1, line4_1, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line5_1, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line6_1, kernel_24_27); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); line1_1 = vextq_f32(line1_1, line1_2, 2); line2_1 = vextq_f32(line2_1, line2_2, 2); line3_1 = vextq_f32(line3_1, line3_2, 2); line4_1 = vextq_f32(line4_1, line4_2, 2); line5_1 = vextq_f32(line5_1, line5_2, 2); line6_1 = vextq_f32(line6_1, line6_2, 2); } /* bottom 1 right */ if (remain_w) { float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); float32x4_t kernel_37_40 = vextq_f32(kernel_36_39, kernel_40_43, 1); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_4_7)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_37_40)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_21_24); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_low_f32(kernel_4_7)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_9_12)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_16_19)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_23_26)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_21_24); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } } else { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_0353637); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_low_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_low_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_low_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_low_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_low_f32(kernel_38_41)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_0012); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_0789); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_0141516); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_0212223); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_low_f32(kernel_3_6)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_low_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_low_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_low_f32(kernel_24_27)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } } else { input_1 = input + c * input_hw + (input_h - 5) * input_w; input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); /* bottom 0 left */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line_1_01 = vuzpq_f32(line1, line1_1); line_2_01 = vuzpq_f32(line2, line2_1); line_3_01 = vuzpq_f32(line3, line3_1); line_4_01 = vuzpq_f32(line4, line4_1); line_5_01 = vuzpq_f32(line5, line5_1); /* bottom 0 mid */ for (w = 0; w < mid_block; w++) { float32x4x2_t line_1_23 = vld2q_f32(input_1 + 8 + 8 * w); float32x4x2_t line_2_23 = vld2q_f32(input_2 + 8 + 8 * w); float32x4x2_t line_3_23 = vld2q_f32(input_3 + 8 + 8 * w); float32x4x2_t line_4_23 = vld2q_f32(input_4 + 8 + 8 * w); float32x4x2_t line_5_23 = vld2q_f32(input_5 + 8 + 8 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_1_01.val[1], vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_2_01.val[1], vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_3_01.val[1], vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_4_01.val[1], vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_5_01.val[1], vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; line_1_01 = line_1_23; line_2_01 = line_2_23; line_3_01 = line_3_23; line_4_01 = line_4_23; line_5_01 = line_5_23; } line_1_01 = vzipq_f32(line_1_01.val[0], line_1_01.val[1]); line_2_01 = vzipq_f32(line_2_01.val[0], line_2_01.val[1]); line_3_01 = vzipq_f32(line_3_01.val[0], line_3_01.val[1]); line_4_01 = vzipq_f32(line_4_01.val[0], line_4_01.val[1]); line_5_01 = vzipq_f32(line_5_01.val[0], line_5_01.val[1]); line1 = line_1_01.val[0]; line1_1 = line_1_01.val[1]; line2 = line_2_01.val[0]; line2_1 = line_2_01.val[1]; line3 = line_3_01.val[0]; line3_1 = line_3_01.val[1]; line4 = line_4_01.val[0]; line4_1 = line_4_01.val[1]; line5 = line_5_01.val[0]; line5_1 = line_5_01.val[1]; for (w = mid_block * 4; w < mid_w; w++) { float32x4_t line1_2 = vld1q_f32(input_1 + 8 + 2 * w); float32x4_t line2_2 = vld1q_f32(input_2 + 8 + 2 * w); float32x4_t line3_2 = vld1q_f32(input_3 + 8 + 2 * w); float32x4_t line4_2 = vld1q_f32(input_4 + 8 + 2 * w); float32x4_t line5_2 = vld1q_f32(input_5 + 8 + 2 * w); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line1_1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2_1, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3_1, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4_1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5_1, kernel_31_34); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line1_1 = vextq_f32(line1_1, line1_2, 2); line2_1 = vextq_f32(line2_1, line2_2, 2); line3_1 = vextq_f32(line3_1, line3_2, 2); line4_1 = vextq_f32(line4_1, line4_2, 2); line5_1 = vextq_f32(line5_1, line5_2, 2); } /* bottom 0 right */ if (remain_w) { float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_4_7)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } } else { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_low_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_low_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_low_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_low_f32(kernel_31_34)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } } } } #endif
GB_binop__rminus_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint16) // A*D function (colscale): GB (_AxD__rminus_uint16) // D*A function (rowscale): GB (_DxB__rminus_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint16) // C=scalar+B GB (_bind1st__rminus_uint16) // C=scalar+B' GB (_bind1st_tran__rminus_uint16) // C=A+scalar GB (_bind2nd__rminus_uint16) // C=A'+scalar GB (_bind2nd_tran__rminus_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT16 || GxB_NO_RMINUS_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
parallel-firstprivate.c
/* * parallel-firstprivate.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; #pragma omp parallel num_threads(2) firstprivate(var) { var = 1; } fprintf(stderr, "DONE\n"); // var should still be 0! return var; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
GB_unaryop__lnot_uint32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_uint8 // op(A') function: GB_tran__lnot_uint32_uint8 // C type: uint32_t // A type: uint8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_uint8 ( uint32_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
a.21.2.c
/* { dg-do compile } */ void work (int i) { } void a21_wrong (int n) { int i; #pragma omp for ordered for (i = 0; i < n; i++) { /* incorrect because an iteration may not execute more than one ordered region */ #pragma omp ordered work (i); #pragma omp ordered work (i + 1); } }
nqueens-openmp.c
//# 601 west second street, 2nd floor, elevator B, two rights #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <unistd.h> void print_board(int n, char *board){ for (int r = 0; r < n; r++) { for (int c = 0; c < n; c++) { printf("%c ", board[r*n+c]); } printf("\n"); } printf("\n"); } int calc_row(int n, int index){ return floor(index / n); } int calc_col(int n, int index){ return index % n; } int calc_index(int n, int row, int col) { return row * n + col; } void remove_rows(int n, int index, char *board){ int row = calc_row(n, index); int col_start = calc_index(n, row, 0); int col_end = calc_index(n, row, n); for(int i = col_start; i < col_end; i++) { if (board[i] != 'q') { board[i] = '*'; } } } void remove_cols(int n, int index, char *board){ int col = calc_col(n, index); int row_start = calc_index(n, 0, col); int row_end = calc_index(n, n, col); for(int i = row_start; i < row_end; i += n){ if (board[i] != 'q') { board[i] = '*'; } } } void remove_diagonals(int n, int index, char *board){ int row; int col_start; int diag; // Down and Right row = calc_row(n, index); col_start = calc_col(n, index) + 1; for (int col = col_start; col < n; col++){ if (row + 1 < n){ row += 1; diag = calc_index(n, row, col); if (board[diag] != 'q'){ board[diag] = '*'; } } } // Down and Left row = calc_row(n, index); col_start = calc_col(n, index) - 1; for (int col = col_start; col > -1; col--){ if (row + 1 < n){ row += 1; diag = calc_index(n, row, col); if (board[diag] != 'q'){ board[diag] = '*'; } } } // Up and Right row = calc_row(n, index); col_start = calc_col(n, index) + 1; for (int col = col_start; col < n; col++){ if (row - 1 >= 0){ row -= 1; diag = calc_index(n, row, col); if (board[diag] != 'q'){ board[diag] = '*'; } } } // Up and Left row = calc_row(n, index); col_start = calc_col(n, index) - 1; for (int col = col_start; col > -1; col--){ if (row - 1 >= 0){ row -= 1; diag = calc_index(n, row, col); if (board[diag] != 'q'){ board[diag] = '*'; } } } } void place_piece(int n, int index, char *board){ board[index] = 'q'; remove_rows(n, index, board); remove_cols(n, index, board); remove_diagonals(n, index, board); } int remaining_moves(int n, char *board){ int moves = 0; for (int i = 0; i < n*n; i++) { if (board[i] == '_'){ moves += 1; } } return moves; } void new_solution(int n, char *board, int s_top, char **solutions){ for (int i = 0; i < n*n; i++) { solutions[s_top][i] = board[i]; } } int validate_board(int n, char *board){ int queens = 0; for (int i = 0; i < n*n; i++) { if (board[i] == 'q') { queens += 1; } } if (queens == n) { return 1; } else { return 0; } } void new_board(int n, int top, char *board, char **stack){ for (int i = 0; i < n*n; i++) { board[i] = stack[top][i]; } } void validate_stack_size(int n, int top, int *max, char **stack){ // realloc stack if it needs to grow if (top >= *max){ *max = *max + floor(*max + n); stack = realloc(stack, (*max)*sizeof(char*)); for (int i = top; i < (*max); i++) { stack[i] = malloc((n * n)*sizeof(char)); } } } void backtrack(int n, int index, char *board, int *top, char **stack){ // if only one move is left, backtracking is not needed if (remaining_moves(n, board) > 1) { // set the place holder for the current move board[index] = '*'; // copy the backtracking array to stack for (int i = 0; i < n*n; i++) { stack[*top][i] = board[i]; } // move to the next item in the stack *top += 1; } } int main() { system("clear"); system("reset"); // The size of the board, make this an argument int n = 10; int s_max = n; // Max solutions int s_top = 0; // Current solution int sols = 0; // Total number of solutions found // Create a 2D array that will hold N*N board arrays char **istack = (char**) malloc(n*n*sizeof(char*)); for (int i = 0; i < n*n; i++) { istack[i] = (char*) malloc((n*n)*sizeof(char)); } // Create a 2D array that will hold n solutions. char **solutions = (char**) malloc(n*sizeof(char*)); for (int i = 0; i < n; i++) { solutions[i] = (char*) malloc((n*n)*sizeof(char)); } // initialize istack with every possible first move #pragma omp parallel for for (int o = 0; o < n*n; o++){ for (int i = 0; i < n*n; i++) { if (i < o){ istack[o][i] = '*'; } else { istack[o][i] = '_'; } } } #pragma omp parallel shared(n, s_top, s_max, sols, solutions) { #pragma omp for for (int init=0; init < n*n; init++) { #pragma omp task { // Thread specific variables int tid = omp_get_thread_num(); int top = 0; int max = n*n; int index = 0; int c = 0; int matches = 0; char match_found = 'f'; char successful_move = 'f'; char *board = (char*) malloc((n*n)*sizeof(char)); new_board(n, init, board, istack); // Create a stack to manage backtracking // This is a 2D array that holds boards // Top is the latst board to be added // Max is the maximum number of boards that can fit in the // memory allocation char **stack = (char**) malloc(n*n*sizeof(char*)); for (int i = 0; i < n*n; i++) { stack[i] = (char*) malloc((n*n)*sizeof(char)); } while (top >= 0) { if (remaining_moves(n, board) > 0){ index = 0; successful_move = 'f'; while (index < n*n && successful_move == 'f') { if (board[index] == '_') { validate_stack_size(n, top, &max, stack); backtrack(n, index, board, &top, stack); place_piece(n, index, board); successful_move = 't'; } else { index += 1; } } } else { #pragma omp critical { if (validate_board(n, board) == 1){ if (s_top == 0){ for (int i=0; i < n*n; i++){ solutions[s_top][i] = board[i]; } s_top += 1; sols += 1; } else { match_found = 'f'; for (int s=0; s < s_top; s++){ if (match_found == 'f'){ matches = 0; for (int c=0; c < n*n; c++){ if (solutions[s][c]==board[c]){ matches += 1; } } if (matches == n*n){ match_found = 't'; } } } if (match_found == 'f'){ if (s_top >= s_max){ s_max = s_max + floor(s_max / 2); solutions = realloc(solutions, (s_max)*sizeof(char*)); for (int i = s_top; i < (s_max); i++) { solutions[i] = malloc((n*n)*sizeof(char)); } } for (int i=0; i < n*n; i++){ solutions[s_top][i] = board[i]; } s_top += 1; sols += validate_board(n, board); } } } } top -= 1; if (top >= 0) { new_board(n, top, board, stack); } } } free(stack); free(board); } } } printf("Solutions: %d\n\n", sols); free(istack); free(solutions); }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // author:BUG1989 (https://github.com/BUG1989/) Long-term support. // author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7. // // Copyright (C) 2019 BUG1989. All rights reserved. // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(4*4, inch, outch, 2ul); // G const short ktm[4][3] = { { 2, 0, 0}, { 1, 1, 1}, { 1, -1, 1}, { 0, 0, 2} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<4; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; const short* kernel4 = (const short*)kernel_tm + (p+4)*inch*16; const short* kernel5 = (const short*)kernel_tm + (p+5)*inch*16; const short* kernel6 = (const short*)kernel_tm + (p+6)*inch*16; const short* kernel7 = (const short*)kernel_tm + (p+7)*inch*16; short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; kernel4 += 16; kernel5 += 16; kernel6 += 16; kernel7 += 16; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm + p*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 16; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*4, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j=0; j<nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i<nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON #if __aarch64__ asm volatile( // load "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v1.8b}, [%1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.8b}, [%2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v3.8b}, [%3] \n" // w = B_t * d, trans int8 to int16 "ssubl v4.8h, v0.8b, v2.8b \n" // d4 "saddl v5.8h, v1.8b, v2.8b \n" // d6 "ssubl v6.8h, v2.8b, v1.8b \n" // d8 "ssubl v7.8h, v3.8b, v1.8b \n" // d10 // transpose w to w_t "trn1 v8.4h, v4.4h, v5.4h \n" "trn2 v9.4h, v4.4h, v5.4h \n" "trn1 v10.4h, v6.4h, v7.4h \n" "trn2 v11.4h, v6.4h, v7.4h \n" "trn1 v0.2s, v8.2s, v10.2s \n" "trn2 v2.2s, v8.2s, v10.2s \n" "trn1 v1.2s, v9.2s, v11.2s \n" "trn2 v3.2s, v9.2s, v11.2s \n" // U = B_t * d_t "sub v4.4h, v0.4h, v2.4h \n" "add v5.4h, v1.4h, v2.4h \n" "sub v6.4h, v2.4h, v1.4h \n" "sub v7.4h, v3.4h, v1.4h \n" // save "st1 {v4.4h}, [%4] \n" "st1 {v5.4h}, [%5] \n" "st1 {v6.4h}, [%6] \n" "st1 {v7.4h}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else asm volatile( // load "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "pld [%1, #64] \n" "vld1.s8 {d1}, [%1] \n" "pld [%2, #64] \n" "vld1.s8 {d2}, [%2] \n" "pld [%3, #64] \n" "vld1.s8 {d3}, [%3] \n" // w = B_t * d, trans int8 to int16 "vsubl.s8 q2, d0, d2 \n" // d4 "vaddl.s8 q3, d1, d2 \n" // d6 "vsubl.s8 q4, d2, d1 \n" // d8 "vsubl.s8 q5, d3, d1 \n" // d10 // transpose w to w_t "vtrn.s16 d4, d6 \n" "vtrn.s16 d8, d10 \n" "vtrn.s32 d4, d8 \n" "vtrn.s32 d6, d10 \n" // U = B_t * d_t "vsub.s16 d11, d4, d8 \n" "vadd.s16 d12, d6, d8 \n" "vsub.s16 d13, d8, d6 \n" "vsub.s16 d14, d10, d6 \n" // save "vst1.s32 {d11}, [%4] \n" "vst1.s32 {d12}, [%5] \n" "vst1.s32 {d13}, [%6] \n" "vst1.s32 {d14}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #endif // __aarch64__ #else short d0[4],d1[4],d2[4],d3[4]; short w0[4],w1[4],w2[4],w3[4]; short t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm1[n] = d1[n]; out_tm2[n] = d2[n]; out_tm3[n] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<4; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; output4_tm += 16; output5_tm += 16; output6_tm += 16; output7_tm += 16; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) //"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%1] \n" "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; #if __ARM_NEON int32x2_t _shift = vdup_n_s32(-2); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2; "sub v1.4s, v1.4s, v2.4s \n" "add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3; "add v1.4s, v1.4s, v3.4s \n" "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "dup v6.2d, v4.d[1] \n" "dup v7.2d, v5.d[1] \n" "add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2; "sub v1.2s, v5.2s, v6.2s \n" "add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3; "add v1.2s, v1.2s, v7.2s \n" "sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2 "sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2 "st1 {v0.2s}, [%1], #8 \n" "st1 {v1.2s}, [%2], #8 \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2; "vsubq.s32 q1, q1, q2 \n" "vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3; "vaddq.s32 q1, q1, q3 \n" "vtrn.s32 q0, q1 \n" "vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2; "vsub.s32 d9, d2, d1 \n" "vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3; "vadd.s32 d9, d9, d3 \n" "vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2 "vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2 "vst1.s32 {d8}, [%1]! \n" "vst1.s32 {d9}, [%2]! \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q4" ); #endif // __aarch64__ #else int s0[4],s1[4],s2[4],s3[4]; int w0[4],w1[4]; int d0[2],d1[2],d2[2],d3[2]; int o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; out_tile += 16; outRow0 += 2; outRow1 += 2; #endif // __ARM_NEON } outRow0 += outw; outRow1 += outw; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(6*6, inch, outch, 2ul); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { { 6, 0, 0}, { -4, -4, -4}, { -4, 4, -4}, { 1, 2, 4}, { 1, -2, 4}, { 0, 0, 6} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i=0; i<6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<6; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<6; i++) { kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<9; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p+1); const short* kernel2 = (const short*)kernel_tm.channel(p+2); const short* kernel3 = (const short*)kernel_tm.channel(p+3); const short* kernel4 = (const short*)kernel_tm.channel(p+4); const short* kernel5 = (const short*)kernel_tm.channel(p+5); const short* kernel6 = (const short*)kernel_tm.channel(p+6); const short* kernel7 = (const short*)kernel_tm.channel(p+7); short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p+1); const short* kernel2 = (const short*)kernel_tm.channel(p+2); const short* kernel3 = (const short*)kernel_tm.channel(p+3); short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm.channel(p); short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 36; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; int* outRow2 = outRow0 + outw * 2; int* outRow3 = outRow0 + outw * 3; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {1, 4}; int32x2_t _tp1 = {2, 8}; // 4*s5[n] int32x4_t _s5x4 = vshlq_n_s32(_s5, 2); int32x2_t _s5x4n = vshl_n_s32(_s5n, 2); int32x4_t _t1p2 = vaddq_s32(_s1, _s2); int32x2_t _t1p2n = vadd_s32 (_s1n, _s2n); int32x4_t _t3p4 = vaddq_s32(_s3, _s4); int32x2_t _t3p4n = vadd_s32 (_s3n, _s4n); int32x4_t _t1s2 = vsubq_s32(_s1, _s2); int32x2_t _t1s2n = vsub_s32 (_s1n, _s2n); int32x4_t _t3s4 = vsubq_s32(_s3, _s4); int32x2_t _t3s4n = vsub_s32 (_s3n, _s4n); _w0 = vaddq_s32(_s0, _t1p2); _w0n = vadd_s32 (_s0n, _t1p2n); _w0 = vaddq_s32(_w0, _t3p4); _w0n = vadd_s32 (_w0n, _t3p4n); _w0n = vmul_s32(_w0n, _tp0); // _w2,_w2n _t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1); _t1p2n = vmla_lane_s32 (_t1p2n, _t3p4n, _tp0, 1); _t1p2n = vmul_s32(_t1p2n, _tp0); _w3 = vaddq_s32(_s5x4, _t1s2); _w3n = vadd_s32 (_s5x4n, _t1s2n); _w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1); _w3n = vmla_lane_s32 (_w3n, _t3s4n, _tp1, 1); _w3n = vmul_s32(_w3n, _tp0); // _w1, _w1n _t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0); _t1s2n = vmla_lane_s32 (_t1s2n, _t3s4n, _tp1, 0); _t1s2n = vmul_s32(_t1s2n, _tp0); int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n); int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n); // transpose w to w_t #if __aarch64__ int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2); int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2); int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3); int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3); int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2)); int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2)); int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3)); int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3)); _d0 = vreinterpretq_s32_s64(_dt0); _d1 = vreinterpretq_s32_s64(_dt1); _d2 = vreinterpretq_s32_s64(_dt2); _d3 = vreinterpretq_s32_s64(_dt3); _d4 = vtrn1q_s32(_w02n, _w13n); _d5 = vtrn2q_s32(_w02n, _w13n); #else asm volatile( "vtrn.32 %q[_w0], %q[_w1] \n" "vtrn.32 %q[_w2], %q[_w3] \n" "vswp %f[_w0], %e[_w2] \n" "vswp %f[_w1], %e[_w3] \n" "vtrn.32 %q[_w02n], %q[_w13n] \n" : [_w0]"+w"(_w0), [_w1]"+w"(_t1s2), [_w2]"+w"(_t1p2), [_w3]"+w"(_w3), [_w02n]"+w"(_w02n), [_w13n]"+w"(_w13n) : : "cc", "memory" ); _d0 = _w0; _d1 = _t1s2; _d2 = _t1p2; _d3 = _w3; _d4 = _w02n; _d5 = _w13n; #endif // Y = A_T * w_t _t1p2 = vaddq_s32(_d1, _d2); _t3p4 = vaddq_s32(_d3, _d4); _t1s2 = vsubq_s32(_d1, _d2); _t3s4 = vsubq_s32(_d3, _d4); _o0 = vaddq_s32(_d0, _t1p2); _o0 = vaddq_s32(_o0, _t3p4); // _o2 _t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1); _o3 = vaddq_s32(_d5, _t1s2); _o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1); // _o1 _t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0); // save to top blob tm float32x4_t _ot0 = vcvtq_f32_s32(_o0); float32x4_t _ot1 = vcvtq_f32_s32(_t1s2); float32x4_t _ot2 = vcvtq_f32_s32(_t1p2); float32x4_t _ot3 = vcvtq_f32_s32(_o3); _ot0 = vmulq_n_f32(_ot0, 0.0017361112); _ot1 = vmulq_n_f32(_ot1, 0.0017361112); _ot2 = vmulq_n_f32(_ot2, 0.0017361112); _ot3 = vmulq_n_f32(_ot3, 0.0017361112); _o0 = vcvtq_s32_f32(_ot0); _o1 = vcvtq_s32_f32(_ot1); _o2 = vcvtq_s32_f32(_ot2); _o3 = vcvtq_s32_f32(_ot3); vst1q_s32(outRow0, _o0); vst1q_s32(outRow1, _o1); vst1q_s32(outRow2, _o2); vst1q_s32(outRow3, _o3); #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 5; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + 4*s5[n]; } for (int n = 5; n < 6; n++) { w0[n] = 4*(s0[n] + s1[n] + s2[n] + s3[n] + s4[n]); w1[n] = 4*(s1[n] - s2[n] + 2*s3[n] - 2*s4[n]); w2[n] = 4*(s1[n] + s2[n] + 4*s3[n] + 4*s4[n]); w3[n] = 4*(s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + 4*s5[n]); } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); float* outRow0 = top_blob_bordered.channel(p); float* outRow1 = outRow0 + outw; float* outRow2 = outRow0 + outw * 2; float* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; const float scale_dequant0 = scales_dequant[p]; const float scale0 = scale_dequant0 / 576.0; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {1, 4}; int32x2_t _tp1 = {2, 8}; // 4*s5[n] int32x4_t _s5x4 = vshlq_n_s32(_s5, 2); int32x2_t _s5x4n = vshl_n_s32(_s5n, 2); int32x4_t _t1p2 = vaddq_s32(_s1, _s2); int32x2_t _t1p2n = vadd_s32 (_s1n, _s2n); int32x4_t _t3p4 = vaddq_s32(_s3, _s4); int32x2_t _t3p4n = vadd_s32 (_s3n, _s4n); int32x4_t _t1s2 = vsubq_s32(_s1, _s2); int32x2_t _t1s2n = vsub_s32 (_s1n, _s2n); int32x4_t _t3s4 = vsubq_s32(_s3, _s4); int32x2_t _t3s4n = vsub_s32 (_s3n, _s4n); _w0 = vaddq_s32(_s0, _t1p2); _w0n = vadd_s32 (_s0n, _t1p2n); _w0 = vaddq_s32(_w0, _t3p4); _w0n = vadd_s32 (_w0n, _t3p4n); _w0n = vmul_s32(_w0n, _tp0); // _w2,_w2n _t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1); _t1p2n = vmla_lane_s32 (_t1p2n, _t3p4n, _tp0, 1); _t1p2n = vmul_s32(_t1p2n, _tp0); _w3 = vaddq_s32(_s5x4, _t1s2); _w3n = vadd_s32 (_s5x4n, _t1s2n); _w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1); _w3n = vmla_lane_s32 (_w3n, _t3s4n, _tp1, 1); _w3n = vmul_s32(_w3n, _tp0); // _w1, _w1n _t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0); _t1s2n = vmla_lane_s32 (_t1s2n, _t3s4n, _tp1, 0); _t1s2n = vmul_s32(_t1s2n, _tp0); int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n); int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n); // transpose w to w_t #if __aarch64__ int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2); int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2); int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3); int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3); int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2)); int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2)); int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3)); int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3)); _d0 = vreinterpretq_s32_s64(_dt0); _d1 = vreinterpretq_s32_s64(_dt1); _d2 = vreinterpretq_s32_s64(_dt2); _d3 = vreinterpretq_s32_s64(_dt3); _d4 = vtrn1q_s32(_w02n, _w13n); _d5 = vtrn2q_s32(_w02n, _w13n); #else asm volatile( "vtrn.32 %q[_w0], %q[_w1] \n" "vtrn.32 %q[_w2], %q[_w3] \n" "vswp %f[_w0], %e[_w2] \n" "vswp %f[_w1], %e[_w3] \n" "vtrn.32 %q[_w02n], %q[_w13n] \n" : [_w0]"+w"(_w0), [_w1]"+w"(_t1s2), [_w2]"+w"(_t1p2), [_w3]"+w"(_w3), [_w02n]"+w"(_w02n), [_w13n]"+w"(_w13n) : : "cc", "memory" ); _d0 = _w0; _d1 = _t1s2; _d2 = _t1p2; _d3 = _w3; _d4 = _w02n; _d5 = _w13n; #endif // Y = A_T * w_t _t1p2 = vaddq_s32(_d1, _d2); _t3p4 = vaddq_s32(_d3, _d4); _t1s2 = vsubq_s32(_d1, _d2); _t3s4 = vsubq_s32(_d3, _d4); _o0 = vaddq_s32(_d0, _t1p2); _o0 = vaddq_s32(_o0, _t3p4); // _o2 _t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1); _o3 = vaddq_s32(_d5, _t1s2); _o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1); // _o1 _t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0); // save to top blob tm float32x4_t _scale0 = vdupq_n_f32(scale0); float32x4_t _out0_f32 = vdupq_n_f32(bias0); float32x4_t _out1_f32 = vdupq_n_f32(bias0); float32x4_t _out2_f32 = vdupq_n_f32(bias0); float32x4_t _out3_f32 = vdupq_n_f32(bias0); _out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0); _out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_t1s2), _scale0); _out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_t1p2), _scale0); _out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0); vst1q_f32(outRow0, _out0_f32); vst1q_f32(outRow1, _out1_f32); vst1q_f32(outRow2, _out2_f32); vst1q_f32(outRow3, _out3_f32); #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 5; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + 4*s5[n]; } for (int n = 5; n < 6; n++) { w0[n] = 4*(s0[n] + s1[n] + s2[n] + s3[n] + s4[n]); w1[n] = 4*(s1[n] - s2[n] + 2*s3[n] - 2*s4[n]); w2[n] = 4*(s1[n] + s2[n] + 4*s3[n] + 4*s4[n]); w3[n] = 4*(s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + 4*s5[n]); } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = (float)o0[n] * scale0 + bias0; outRow1[n] = (float)o1[n] * scale0 + bias0; outRow2[n] = (float)o2[n] * scale0 + bias0; outRow3[n] = (float)o3[n] * scale0 + bias0; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*9, inch, outch/8 + outch%8, (size_t)1u); const signed char* kernel = _kernel; int p=0; for (; p+7<outch; p+=8) { const signed char* k0 = kernel + (p+0)*inch*9; const signed char* k1 = kernel + (p+1)*inch*9; const signed char* k2 = kernel + (p+2)*inch*9; const signed char* k3 = kernel + (p+3)*inch*9; const signed char* k4 = kernel + (p+4)*inch*9; const signed char* k5 = kernel + (p+5)*inch*9; const signed char* k6 = kernel + (p+6)*inch*9; const signed char* k7 = kernel + (p+7)*inch*9; signed char* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*9; signed char* ktmp = kernel_tm.channel(p/8 + p%8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p+0); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); out0.fill(0); out1.fill(0); out2.fill(0); out3.fill(0); out4.fill(0); out5.fill(0); out6.fill(0); out7.fill(0); const signed char* ktmp = _kernel.channel(p/8); for (int q=0; q<inch; q++) { int* outptr0 = out0; int* outptr1 = out1; int* outptr2 = out2; int* outptr3 = out3; int* outptr4 = out4; int* outptr5 = out5; int* outptr6 = out6; int* outptr7 = out7; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%9], #16 \n"//r0-r2 "ld2 {v5.8b, v6.8b}, [%9] \n" "ld1 {v8.4s, v9.4s}, [%1] \n"//out0 "ld1 {v10.4s, v11.4s}, [%2] \n"//out1 "ld1 {v12.4s, v13.4s}, [%3] \n"//out2 "ld1 {v14.4s, v15.4s}, [%4] \n"//out3 "ld1 {v16.4s, v17.4s}, [%5] \n"//out4 "ld1 {v18.4s, v19.4s}, [%6] \n"//out5 "ld1 {v20.4s, v21.4s}, [%7] \n"//out6 "ld1 {v22.4s, v23.4s}, [%8] \n"//out7 "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k00-k70) "sshll v1.8h, v1.8b, #0 \n"//(k01-k71) "sshll v2.8h, v2.8b, #0 \n"//(k02-k72) "sshll v3.8h, v3.8b, #0 \n"// r0 "sshll v4.8h, v4.8b, #0 \n"// r1 "sshll v7.8h, v7.8b, #0 \n"// r2 // r0 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r00-r07)*k00 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r00-r07)*k10 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r00-r07)*k20 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r00-r07)*k30 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r00-r07)*k40 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r00-r07)*k50 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r00-r07)*k60 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r00-r07)*k70 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r1 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r10-r17)*k01 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r10-r17)*k11 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r10-r17)*k21 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r10-r17)*k31 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r10-r17)*k41 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r10-r17)*k51 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r10-r17)*k61 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r10-r17)*k71 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r2 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r20-r27)*k02 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r20-r27)*k12 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r20-r27)*k22 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r20-r27)*k32 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r20-r27)*k42 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r20-r27)*k52 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r20-r27)*k62 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r20-r27)*k72 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%10], #16 \n"//r3-r5 "ld2 {v5.8b, v6.8b}, [%10] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k03-k73) "sshll v1.8h, v1.8b, #0 \n"//(k04-k74) "sshll v2.8h, v2.8b, #0 \n"//(k05-k75) "sshll v3.8h, v3.8b, #0 \n"// r3 "sshll v4.8h, v4.8b, #0 \n"// r4 "sshll v7.8h, v7.8b, #0 \n"// r5 // r3 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r30-r37)*k03 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r30-r37)*k13 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r30-r37)*k23 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r30-r37)*k33 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r30-r37)*k43 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r30-r37)*k53 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r30-r37)*k63 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r30-r37)*k73 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r4 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r40-r47)*k04 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r40-r47)*k14 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r40-r47)*k24 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r40-r47)*k34 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r40-r47)*k44 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r40-r47)*k54 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r40-r47)*k64 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r40-r47)*k74 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r5 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r50-r57)*k05 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r50-r57)*k15 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r50-r57)*k25 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r50-r57)*k35 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r50-r57)*k45 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r50-r57)*k55 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r50-r57)*k65 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r50-r57)*k75 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%11], #16 \n"//r6-r8 "ld2 {v5.8b, v6.8b}, [%11] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k06-k76) "sshll v1.8h, v1.8b, #0 \n"//(k07-k77) "sshll v2.8h, v2.8b, #0 \n"//(k08-k78) "sshll v3.8h, v3.8b, #0 \n"// r6 "sshll v4.8h, v4.8b, #0 \n"// r7 "sshll v7.8h, v7.8b, #0 \n"// r8 // r6 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r60-r67)*k06 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r60-r67)*k16 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r60-r67)*k26 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r60-r67)*k36 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r60-r67)*k46 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r60-r67)*k56 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r60-r67)*k66 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r60-r67)*k76 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r7 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r70-r77)*k07 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r70-r77)*k17 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r70-r77)*k27 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r70-r77)*k37 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r70-r77)*k47 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r70-r77)*k57 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r70-r77)*k67 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r70-r77)*k77 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r8 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r80-r87)*k08 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r80-r87)*k18 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r80-r87)*k28 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r80-r87)*k38 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r80-r87)*k48 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r80-r87)*k58 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r80-r87)*k68 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r80-r87)*k78 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "st1 {v16.4s, v17.4s}, [%5], #32 \n" "st1 {v18.4s, v19.4s}, [%6], #32 \n" "st1 {v20.4s, v21.4s}, [%7], #32 \n" "st1 {v22.4s, v23.4s}, [%8], #32 \n" "subs %w0, %w0, #1 \n" "sub %12, %12, #72 \n"// reset ktmp "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.s32 {d16-d17}, [%1] \n"// out0 "pld [%2, #128] \n" "vld1.s32 {d18-d19}, [%2] \n"// out1 "pld [%3, #128] \n" "vld1.s32 {d20-d21}, [%3] \n"// out2 "pld [%4, #128] \n" "vld1.s32 {d22-d23}, [%4] \n"// out3 // r0 "pld [%9, #64] \n" "vld2.s8 {d8-d9}, [%9] \n"// d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015) "add %9, #8 \n" "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k00-k70) d1(k01-k71) d2(k02-k72) "pld [%5, #128] \n" "vld1.s32 {d24-d25}, [%5] \n"// out4 "pld [%6, #128] \n" "vld1.s32 {d26-d27}, [%6] \n"// out5 "vmovl.s8 q2, d2 \n"// q2(k02-k72) "vmovl.s8 q1, d1 \n"// q1(k01-k71) "vmovl.s8 q0, d0 \n"// q0(k00-k70) "vext.s8 d12, d8, d8, #1 \n"// d12(a02 a04 a06 a08 x x x x) "pld [%7, #128] \n" "vld1.s32 {d28-d29}, [%7] \n"// out6 "vmovl.s8 q5, d9 \n"// q5(a01 a03 a05 a07 a09 a011 a013 a015) d11 "vmovl.s8 q4, d8 \n"// q4(a00 a02 a04 a06 a08 a010 a012 a014) d9 "vmovl.s8 q6, d12 \n"// q6(a02 a04 a06 a08 a010 a012 a014 a016) d13 "pld [%8, #128] \n" "vld1.s32 {d30-d31}, [%8] \n"// out7 "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a00 a02 a04 a06) * k00 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a00 a02 a04 a06) * k10 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a00 a02 a04 a06) * k20 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a00 a02 a04 a06) * k30 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a00 a02 a04 a06) * k40 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a00 a02 a04 a06) * k50 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a00 a02 a04 a06) * k60 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a00 a02 a04 a06) * k70 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a01-a07) * k01 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a01-a07) * k11 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a01-a07) * k21 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a01-a07) * k31 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a01-a07) * k41 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a01-a07) * k51 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a01-a07) * k61 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a01-a07) * k71 "pld [%10, #64] \n" "vld2.s8 {d8-d9}, [%10] \n"// d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115) "add %10, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a02-a08) * k02 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a02-a08) * k12 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a02-a08) * k22 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a02-a08) * k32 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k03-k73) d1(k04-k74) d2(k05-k75) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a02-a08) * k42 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a02-a08) * k52 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a02-a08) * k62 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a02-a08) * k72 // r1 "vext.s8 d12, d8, d8, #1 \n"// d12(a12 a14 a16 a18 x x x x) "vmovl.s8 q2, d2 \n"// q2(k05-k75) "vmovl.s8 q1, d1 \n"// q1(k04-k74) "vmovl.s8 q0, d0 \n"// q0(k03-k73) "vmovl.s8 q5, d9 \n"// q5(a11-a115) "vmovl.s8 q4, d8 \n"// q4(a10-a114) "vmovl.s8 q6, d12 \n"// q6(a12-a116) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a10-a16) * k03 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a10-a16) * k13 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a10-a16) * k23 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a10-a16) * k33 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a10-a16) * k43 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a10-a16) * k53 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a10-a16) * k63 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a10-a16) * k73 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a11-a17) * k04 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a11-a17) * k14 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a11-a17) * k24 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a11-a17) * k34 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a11-a17) * k44 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a11-a17) * k54 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a11-a17) * k64 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a11-a17) * k74 "pld [%11, #64] \n" "vld2.s8 {d8-d9}, [%11] \n"// d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215) "add %11, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a12-a18) * k05 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a12-a18) * k15 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a12-a18) * k25 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a12-a18) * k35 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k06-k76) d1(k07-k77) d2(k08-k78) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a12-a18) * k45 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a12-a18) * k55 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a12-a18) * k65 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a12-a18) * k75 // r2 "vext.s8 d12, d8, d8, #1 \n"// d12(a22 a24 a26 a28 x x x x) "vmovl.s8 q2, d2 \n"// q2(k08-k78) "vmovl.s8 q1, d1 \n"// q1(k07-k77) "vmovl.s8 q0, d0 \n"// q0(k06-k76) "vmovl.s8 q5, d9 \n"// q5(a21-a215) "vmovl.s8 q4, d8 \n"// q4(a20-a214) "vmovl.s8 q6, d12 \n"// q6(a22-a216) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a20-a26) * k06 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a20-a26) * k16 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a20-a26) * k26 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a20-a26) * k36 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a20-a26) * k46 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a20-a26) * k56 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a20-a26) * k66 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a20-a26) * k76 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a21-a27) * k07 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a21-a27) * k17 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a21-a27) * k27 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a21-a27) * k37 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a21-a27) * k47 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a21-a27) * k57 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a21-a27) * k67 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a21-a27) * k77 "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a22-a28) * k08 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a22-a28) * k18 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a22-a28) * k28 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a22-a28) * k38 "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a22-a28) * k48 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a22-a28) * k58 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a22-a28) * k68 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a22-a28) * k78 // save s32 to memory "sub %12, %12, #72 \n" "vst1.s32 {d16-d17}, [%1]! \n"// out0 "vst1.s32 {d18-d19}, [%2]! \n"// out1 "vst1.s32 {d20-d21}, [%3]! \n"// out2 "vst1.s32 {d22-d23}, [%4]! \n"// out3 "subs %0, #1 \n" "vst1.s32 {d24-d25}, [%5]! \n"// out4 "vst1.s32 {d26-d27}, [%6]! \n"// out5 "vst1.s32 {d28-d29}, [%7]! \n"// out6 "vst1.s32 {d30-d31}, [%8]! \n"// out7 "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ int8x8_t _r0_s8 = vld1_s8(r0);// (a00 a01 a02 ....) int8x8_t _r1_s8 = vld1_s8(r1);// (a10 a11 a12 ....) int8x8_t _r2_s8 = vld1_s8(r2);// (a20 a21 a22 ....) int16x8_t _r0 = vmovl_s8(_r0_s8); int16x8_t _r1 = vmovl_s8(_r1_s8); int16x8_t _r2 = vmovl_s8(_r2_s8); int32x4_t _sum03, _sum47; _sum03 = vld1q_lane_s32(outptr0, _sum03, 0);// out0 _sum03 = vld1q_lane_s32(outptr1, _sum03, 1);// out1 _sum03 = vld1q_lane_s32(outptr2, _sum03, 2);// out2 _sum03 = vld1q_lane_s32(outptr3, _sum03, 3);// out3 _sum47 = vld1q_lane_s32(outptr4, _sum47, 0);// out4 _sum47 = vld1q_lane_s32(outptr5, _sum47, 1);// out5 _sum47 = vld1q_lane_s32(outptr6, _sum47, 2);// out6 _sum47 = vld1q_lane_s32(outptr7, _sum47, 3);// out7 // k0 - k2 int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70) int8x8_t _k1_8 = vld1_s8(ktmp+8); //(k01-k71) int8x8_t _k2_8 = vld1_s8(ktmp+16); //(k02-k72) int16x8_t _k0 = vmovl_s8(_k0_8); int16x8_t _k1 = vmovl_s8(_k1_8); int16x8_t _k2 = vmovl_s8(_k2_8); int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0); int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0); int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1); int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2); // k3 - k5 _k0_8 = vld1_s8(ktmp+24); //(k03-k73) _k1_8 = vld1_s8(ktmp+32); //(k04-k74) _k2_8 = vld1_s8(ktmp+40); //(k05-k75) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2); // k6 - k8 _k0_8 = vld1_s8(ktmp+48); //(k06-k76) _k1_8 = vld1_s8(ktmp+56); //(k07-k77) _k2_8 = vld1_s8(ktmp+64); //(k08-k78) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2); _sum0 = vaddq_s32(_sum0, _sum1); _sum0n = vaddq_s32(_sum0n, _sum1n); _sum03 = vaddq_s32(_sum03, _sum0); _sum47 = vaddq_s32(_sum47, _sum0n); vst1q_lane_s32(outptr0, _sum03, 0); vst1q_lane_s32(outptr1, _sum03, 1); vst1q_lane_s32(outptr2, _sum03, 2); vst1q_lane_s32(outptr3, _sum03, 3); vst1q_lane_s32(outptr4, _sum47, 0); vst1q_lane_s32(outptr5, _sum47, 1); vst1q_lane_s32(outptr6, _sum47, 2); vst1q_lane_s32(outptr7, _sum47, 3); outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #else // __aarch64__ asm volatile( "pld [%8, #64] \n" "vld1.s8 {d0}, [%8] \n"// d0(a00 a01 a02 ....) "pld [%9, #64] \n" "vld1.s8 {d2}, [%9] \n"// d2(a10 a11 a12 ....) "pld [%10, #64] \n" "vld1.s8 {d4}, [%10] \n"// d4(a20 a21 a22 ....) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n"// d6(k00-k70) d7(k01-k71) d8(k02-k72) "vmovl.s8 q0, d0 \n"// d0(a00 a01 a02 x) "vmovl.s8 q1, d2 \n"// d2(a10 a11 a12 x) "vmovl.s8 q2, d4 \n"// d4(a20 a21 a22 x) "vmovl.s8 q5, d8 \n"// d10(k02-k32) d11(k42-k72) "vmovl.s8 q4, d7 \n"// d8(k01-k31) d9(k41-k71) "vmovl.s8 q3, d6 \n"// d6(k00-k30) d7(k40-k70) "vld1.s32 {d20[0]}, [%0] \n"// out0 q10 "vld1.s32 {d20[1]}, [%1] \n"// out1 "vld1.s32 {d21[0]}, [%2] \n"// out2 "vld1.s32 {d21[1]}, [%3] \n"// out3 "pld [%11, #64] \n" "vld1.s8 {d24-d26}, [%11]! \n" "vmovl.s8 q14, d26 \n"// d28(k05-k35) d29(k45-k75) "vmovl.s8 q13, d25 \n"// d26(k04-k34) d27(k44-k74) "vmovl.s8 q12, d24 \n"// d24(k03-k33) d25(k43-k73) "vld1.s32 {d22[0]}, [%4] \n"// out4 q11 "vld1.s32 {d22[1]}, [%5] \n"// out5 "vld1.s32 {d23[0]}, [%6] \n"// out6 "vld1.s32 {d23[1]}, [%7] \n"// out7 "vmull.s16 q6, d6, d0[0] \n"// a00 x (k00-k30) "vmull.s16 q7, d7, d0[0] \n"// a00 x (k40-k70) "vmull.s16 q8, d8, d0[1] \n"// a01 x (k01-k31) "vmull.s16 q9, d9, d0[1] \n"// a01 x (k41-k71) "vmlal.s16 q10, d10, d0[2] \n"// a02 x (k02-k32) "vmlal.s16 q11, d11, d0[2] \n"// a02 x (k42-k72) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" "vmovl.s8 q5, d8 \n"// d10(k08-k38) d11(k48-k78) "vmovl.s8 q4, d7 \n"// d8(k07-k37) d9(k47-k77) "vmovl.s8 q3, d6 \n"// d6(k06-k36) d7(k46-k76) "vmlal.s16 q6, d24, d2[0] \n"// a10 x (k03-k33) "vmlal.s16 q7, d25, d2[0] \n"// a10 x (k43-k73) "vmlal.s16 q8, d26, d2[1] \n"// a11 x (k04-k34) "vmlal.s16 q9, d27, d2[1] \n"// a11 x (k44-k74) "vmlal.s16 q10, d28, d2[2] \n"// a12 x (k05-k35) "vmlal.s16 q11, d29, d2[2] \n"// a12 x (k45-k75) "vmlal.s16 q6, d6, d4[0] \n"// a20 x (k06-k36) "vmlal.s16 q7, d7, d4[0] \n"// a20 x (k46-k76) "vmlal.s16 q8, d8, d4[1] \n"// a21 x (k07-k37) "vmlal.s16 q9, d9, d4[1] \n"// a21 x (k47-k77) "vmlal.s16 q10, d10, d4[2] \n"// a22 x (k08-k38) "vmlal.s16 q11, d11, d4[2] \n"// a22 x (k48-k78) "vadd.s32 q8, q8, q6 \n" "vadd.s32 q9, q9, q7 \n" "sub %11, %11, #72 \n" "vadd.s32 q10, q10, q8 \n" "vadd.s32 q11, q11, q9 \n" "vst1.s32 {d20[0]}, [%0]! \n"// out0 "vst1.s32 {d20[1]}, [%1]! \n"// out1 "vst1.s32 {d21[0]}, [%2]! \n"// out2 "vst1.s32 {d21[1]}, [%3]! \n"// out3 "vst1.s32 {d22[0]}, [%4]! \n"// out4 "vst1.s32 {d22[1]}, [%5]! \n"// out5 "vst1.s32 {d23[0]}, [%6]! \n"// out6 "vst1.s32 {d23[1]}, [%7]! \n"// out7 : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else // __ARM_NEON int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; sum0 += (int)r0[0] * ktmp[0]; sum1 += (int)r0[0] * ktmp[1]; sum2 += (int)r0[0] * ktmp[2]; sum3 += (int)r0[0] * ktmp[3]; sum4 += (int)r0[0] * ktmp[4]; sum5 += (int)r0[0] * ktmp[5]; sum6 += (int)r0[0] * ktmp[6]; sum7 += (int)r0[0] * ktmp[7]; ktmp += 8; sum0 += (int)r0[1] * ktmp[0]; sum1 += (int)r0[1] * ktmp[1]; sum2 += (int)r0[1] * ktmp[2]; sum3 += (int)r0[1] * ktmp[3]; sum4 += (int)r0[1] * ktmp[4]; sum5 += (int)r0[1] * ktmp[5]; sum6 += (int)r0[1] * ktmp[6]; sum7 += (int)r0[1] * ktmp[7]; ktmp += 8; sum0 += (int)r0[2] * ktmp[0]; sum1 += (int)r0[2] * ktmp[1]; sum2 += (int)r0[2] * ktmp[2]; sum3 += (int)r0[2] * ktmp[3]; sum4 += (int)r0[2] * ktmp[4]; sum5 += (int)r0[2] * ktmp[5]; sum6 += (int)r0[2] * ktmp[6]; sum7 += (int)r0[2] * ktmp[7]; ktmp += 8; sum0 += (int)r1[0] * ktmp[0]; sum1 += (int)r1[0] * ktmp[1]; sum2 += (int)r1[0] * ktmp[2]; sum3 += (int)r1[0] * ktmp[3]; sum4 += (int)r1[0] * ktmp[4]; sum5 += (int)r1[0] * ktmp[5]; sum6 += (int)r1[0] * ktmp[6]; sum7 += (int)r1[0] * ktmp[7]; ktmp += 8; sum0 += (int)r1[1] * ktmp[0]; sum1 += (int)r1[1] * ktmp[1]; sum2 += (int)r1[1] * ktmp[2]; sum3 += (int)r1[1] * ktmp[3]; sum4 += (int)r1[1] * ktmp[4]; sum5 += (int)r1[1] * ktmp[5]; sum6 += (int)r1[1] * ktmp[6]; sum7 += (int)r1[1] * ktmp[7]; ktmp += 8; sum0 += (int)r1[2] * ktmp[0]; sum1 += (int)r1[2] * ktmp[1]; sum2 += (int)r1[2] * ktmp[2]; sum3 += (int)r1[2] * ktmp[3]; sum4 += (int)r1[2] * ktmp[4]; sum5 += (int)r1[2] * ktmp[5]; sum6 += (int)r1[2] * ktmp[6]; sum7 += (int)r1[2] * ktmp[7]; ktmp += 8; sum0 += (int)r2[0] * ktmp[0]; sum1 += (int)r2[0] * ktmp[1]; sum2 += (int)r2[0] * ktmp[2]; sum3 += (int)r2[0] * ktmp[3]; sum4 += (int)r2[0] * ktmp[4]; sum5 += (int)r2[0] * ktmp[5]; sum6 += (int)r2[0] * ktmp[6]; sum7 += (int)r2[0] * ktmp[7]; ktmp += 8; sum0 += (int)r2[1] * ktmp[0]; sum1 += (int)r2[1] * ktmp[1]; sum2 += (int)r2[1] * ktmp[2]; sum3 += (int)r2[1] * ktmp[3]; sum4 += (int)r2[1] * ktmp[4]; sum5 += (int)r2[1] * ktmp[5]; sum6 += (int)r2[1] * ktmp[6]; sum7 += (int)r2[1] * ktmp[7]; ktmp += 8; sum0 += (int)r2[2] * ktmp[0]; sum1 += (int)r2[2] * ktmp[1]; sum2 += (int)r2[2] * ktmp[2]; sum3 += (int)r2[2] * ktmp[3]; sum4 += (int)r2[2] * ktmp[4]; sum5 += (int)r2[2] * ktmp[5]; sum6 += (int)r2[2] * ktmp[6]; sum7 += (int)r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8*9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8*9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char* ktmp = _kernel.channel(p/8 + p%8); for (int q=0; q<inch; q++) { int* outptr = out; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b}, [%5] \n"//ktmp "ld2 {v2.8b, v3.8b}, [%2], #16 \n"//r0-r2 "ld2 {v4.8b, v5.8b}, [%2] \n" "ld2 {v6.8b, v7.8b}, [%3], #16 \n"//r3-r5 "ld2 {v8.8b, v9.8b}, [%3] \n" "ld2 {v10.8b, v11.8b}, [%4], #16 \n"//r6-r8 "ld2 {v12.8b, v13.8b}, [%4] \n" "ld1 {v14.4s, v15.4s}, [%1] \n"//out0 "ext v4.8b, v2.8b, v4.8b, #1 \n" "ext v8.8b, v6.8b, v8.8b, #1 \n" "ext v12.8b, v10.8b, v12.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k0-k7) "sshll v1.8h, v1.8b, #0 \n"//(k8) "sshll v2.8h, v2.8b, #0 \n"// r0 "sshll v3.8h, v3.8b, #0 \n"// r1 "sshll v4.8h, v4.8b, #0 \n"// r2 "sshll v6.8h, v6.8b, #0 \n"// r3 "sshll v7.8h, v7.8b, #0 \n"// r4 "sshll v8.8h, v8.8b, #0 \n"// r5 "sshll v10.8h, v10.8b, #0 \n"// r6 "sshll v11.8h, v11.8b, #0 \n"// r7 "sshll v12.8h, v12.8b, #0 \n"// r8 // r0 "smull v16.4s, v2.4h, v0.h[0] \n"// out = r0*k0 "smull2 v17.4s, v2.8h, v0.h[0] \n" "smull v18.4s, v3.4h, v0.h[1] \n"// outn = r1*k1 "smull2 v19.4s, v3.8h, v0.h[1] \n" "smlal v16.4s, v4.4h, v0.h[2] \n"// out = r2*k2 "smlal2 v17.4s, v4.8h, v0.h[2] \n" "smlal v18.4s, v6.4h, v0.h[3] \n"// outn = r3*k3 "smlal2 v19.4s, v6.8h, v0.h[3] \n" "smlal v16.4s, v7.4h, v0.h[4] \n"// out = r4*k4 "smlal2 v17.4s, v7.8h, v0.h[4] \n" "smlal v18.4s, v8.4h, v0.h[5] \n"// outn = r5*k5 "smlal2 v19.4s, v8.8h, v0.h[5] \n" "smlal v16.4s, v10.4h, v0.h[6] \n"// out = r6*k6 "smlal2 v17.4s, v10.8h, v0.h[6] \n" "smlal v18.4s, v11.4h, v0.h[7] \n"// outn = r7*k7 "smlal2 v19.4s, v11.8h, v0.h[7] \n" "smlal v16.4s, v12.4h, v1.h[0] \n"// out = r8*k8 "smlal2 v17.4s, v12.8h, v1.h[0] \n" "add v8.4s, v16.4s, v18.4s \n" "add v9.4s, v17.4s, v19.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } #else if (nn > 0) { asm volatile( "vld1.s8 {d0-d1}, [%5] \n"// d0(k0 - k7) d1(k8 ...) "vmovl.s8 q1, d1 \n"// d2(k8 ...) "vmovl.s8 q0, d0 \n"// d0(k0 - k3) d1(k4 - k7) "0: \n" "pld [%2, #192] \n" "vld2.s8 {d4-d5}, [%2]! \n"// r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015) "vld2.s8 {d8-d9}, [%2] \n"// d8(a016 ....) "vld2.s8 {d10-d11}, [%3]! \n"// r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115) "vld2.s8 {d14-d15}, [%3] \n"// d14(a116 ....) "vld2.s8 {d16-d17}, [%4]! \n"// r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215) "vld2.s8 {d20-d21}, [%4] \n"// d20(a216 ....) "vld1.s32 {d22-d25}, [%1] \n"// q11(out0 - out3) q12(out4 - out7) "vext.s8 d8, d4, d8, #1 \n"// d8(a02 a04 ... a016) "vext.s8 d14, d10, d14, #1 \n"// d14(a12 a14 ... a116) "vext.s8 d20, d16, d20, #1 \n"// d20(a22 a24 ... a216) "vmovl.s8 q3, d5 \n"// q3(a01 a03 ... a015) "vmovl.s8 q2, d4 \n"// q2(a00 a02 ... a014) "vmovl.s8 q4, d8 \n"// q4(a02 a04 ... a016) "vmovl.s8 q6, d11 \n"// q6(a11 a13 ... a115) "vmovl.s8 q5, d10 \n"// q5(a10 a12 ... a114) "vmovl.s8 q7, d14 \n"// q7(a12 a14 ... a116) "vmovl.s8 q9, d17 \n"// q9(a21 a23 ... a215) "vmovl.s8 q8, d16 \n"// q8(a20 a22 ... a214) "vmovl.s8 q10, d20 \n"// q10(a22 a24 ... a216) "vmlal.s16 q11, d4, d0[0] \n"// k0 "vmlal.s16 q12, d5, d0[0] \n" "vmull.s16 q13, d6, d0[1] \n"// k1 "vmull.s16 q14, d7, d0[1] \n" "vmlal.s16 q11, d8, d0[2] \n"// k2 "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q13, d12, d1[0] \n"// k4 "vmlal.s16 q14, d13, d1[0] \n" "vmlal.s16 q11, d10, d0[3] \n"// k3 "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q13, d14, d1[1] \n"// k5 "vmlal.s16 q14, d15, d1[1] \n" "vmlal.s16 q11, d16, d1[2] \n"// k6 "vmlal.s16 q12, d17, d1[2] \n" "vmlal.s16 q13, d18, d1[3] \n"// k7 "vmlal.s16 q14, d19, d1[3] \n" "vmlal.s16 q11, d20, d2[0] \n"// k8 "vmlal.s16 q12, d21, d2[0] \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.32 {d22-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON if (remain > 0) { #if __ARM_NEON int8x8_t _k01234567s8 = vld1_s8(ktmp); int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp+8); int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3); int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6); int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8); int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8); int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8); #endif for (; remain>0; remain--) { #if __ARM_NEON int8x8_t _r00s8 = vld1_s8(r0); int8x8_t _r10s8 = vld1_s8(r1); int8x8_t _r20s8 = vld1_s8(r2); int16x8_t _r00s16 = vmovl_s8(_r00s8); int16x8_t _r10s16 = vmovl_s8(_r10s8); int16x8_t _r20s16 = vmovl_s8(_r20s8); int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16)); _sum = vsetq_lane_s32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_s32(_sum); #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); *outptr = vget_lane_s32(_ss, 0); #endif // __aarch64__ #else int sum = 0; sum += (int)r0[0] * ktmp[0]; sum += (int)r0[1] * ktmp[1]; sum += (int)r0[2] * ktmp[2]; sum += (int)r1[0] * ktmp[3]; sum += (int)r1[1] * ktmp[4]; sum += (int)r1[2] * ktmp[5]; sum += (int)r2[0] * ktmp[6]; sum += (int)r2[1] * ktmp[7]; sum += (int)r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } } static void conv3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
GroupDegree.h
/* * GroupDegree.h * * Created on: 20.04.2018 * Author: Eugenio Angriman */ #ifndef GROUPDEGREE_H_ #define GROUPDEGREE_H_ #include <omp.h> #include "../auxiliary/BucketPQ.h" #include "../base/Algorithm.h" #include "../graph/Graph.h" namespace NetworKit { /** * @ingroup centrality */ class GroupDegree : public Algorithm { public: /** * Finds the group with the highest group degree centrality according to the * definition proposed in 'The centrality of groups and classes' by Everett et * al. (The Journal of mathematical sociology, 1999). This is a submodular but * non monotone function so the algorithm can find a solution that is at least * 1/2 of the optimum. Worst-case running time is quadratic, but usually * faster in real-world networks. * The 'countGroupNodes' option also count the nodes inside the group in the * score, this make the group degree monotone and submodular and the algorithm * is guaranteed to return a (1 - 1/e)-approximation of the optimal solution. * * @param G A graph. * @param k Size of the group of nodes * @param countGroupNodes if nodes inside the group should be counted in the * centrality score. */ GroupDegree(const Graph &G, count k = 1, bool countGroupNodes = true); /** * Computes the group with maximum degree centrality of the graph passed in * the constructor. */ void run() override; /** * Returns the group with maximum degree centrality. */ std::vector<node> groupMaxDegree(); /** * Returns the score of the group with maximum degree centrality (i.e. the * number of nodes outside the group that can be reached in one hop from at * least one node in the group). */ count getScore(); /** * Returns the score of the given group. */ count scoreOfGroup(const std::vector<node> &group) const; protected: Graph G; const count k; const bool countGroupNodes; count n; std::vector<node> group; std::vector<int64_t> gain; std::vector<bool> reachable; std::vector<bool> affected; std::vector<bool> inGroup; Aux::BucketPQ queue; count groupScore; void init(); void updateQueue(); void updateGroup(); void computeScore(); void checkGroup(const std::vector<node> &group) const; }; inline std::vector<node> GroupDegree::groupMaxDegree() { assureFinished(); return group; } inline count GroupDegree::getScore() { assureFinished(); return groupScore; } inline void GroupDegree::computeScore() { groupScore = std::count(reachable.begin(), reachable.end(), true); if (!countGroupNodes) { groupScore -= k; } } inline void GroupDegree::checkGroup(const std::vector<node> &group) const { const count z = G.upperNodeIdBound(); std::vector<bool> check(z, false); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(group.size()); ++i) { node u = group[i]; if (u >= z) { std::stringstream ss; ss << "Error: node " << u << " is not in the graph.\n"; throw std::runtime_error(ss.str()); } if (check[u]) { std::stringstream ss; ss << "Error: the group contains duplicates of node " << u << ".\n"; throw std::runtime_error(ss.str()); } check[u] = true; } } inline count GroupDegree::scoreOfGroup(const std::vector<node> &group) const { checkGroup(group); std::vector<bool> touched(n, false); std::vector<bool> inGroup(n, false); for (count i = 0; i < group.size(); ++i) { inGroup[group[i]] = true; } auto processNeighbor = [&](const node u, const node v) { if (inGroup[u]) { touched[v] = true; } }; G.forNodes([&](node v) { if (!inGroup[v]) { G.forInNeighborsOf(v, [&](node u) { processNeighbor(u, v); }); } }); count result = std::count(touched.begin(), touched.end(), true); if (countGroupNodes) { result += group.size(); } return result; } } // namespace NetworKit #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) { for (t4=max(max(max(0,ceild(3*t1-511,512)),ceild(24*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(12*t1+Nx+21,2048)),floord(24*t2+Nx+20,2048)),floord(8*t3+Nx+4,2048)),floord(24*t1-24*t2+Nz+Nx+19,2048));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),2048*t4+2046),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
parallelize.h
#pragma once #include <vector> #include <omp.h> #include <gms/common/types.h> #include "gms/third_party/gapbs/gapbs.h" #include <gms/common/cli/cli.h> namespace GMS::KClique { class CLCliqueApp : public GMS::CLI::GapbsCompat { protected: int clique_size_ = 8; public: CLCliqueApp(const GMS::CLI::Args &args, const GMS::CLI::Param &clique_size) : GMS::CLI::GapbsCompat(args) { clique_size_ = clique_size.to_int(); } int clique_size() const { return clique_size_;} }; namespace Parallelize { /** * @brief Simple parallelization over the nodes/vertices of a graph * * @tparam Builder_T Able to construct a subgraph given a graph and a node * @tparam Counter_T Able to count the k-cliques within a (sub-)graph * @param g The complete graph * @param cli Command line interface with relevant parameters * @return unsigned long long Total of counted k-cliques */ template<typename Builder_T, typename Counter_T, typename CGraph = CSRGraph> unsigned long long node(CGraph& g, const CLApp& cli) { const CLCliqueApp& dcli = dynamic_cast<const CLCliqueApp&>(cli); const int cliqueSize = dcli.clique_size(); if(cliqueSize == 1) return g.num_nodes(); if(cliqueSize == 2) return g.num_edges(); uint coreNumber = 0; for(NodeId node = 0; node < g.num_nodes(); node++) { coreNumber = coreNumber > g.out_degree(node) ? coreNumber : g.out_degree(node); } unsigned long long count = 0; #pragma omp parallel reduction(+:count) { Builder_T builder(g, (uint)coreNumber); Counter_T counter(cliqueSize -1, coreNumber); #pragma omp for schedule(dynamic, 1) nowait for(NodeId node = 0; node < g.num_nodes(); node++) { CGraph graph = builder.buildSubGraph(node); count += counter.count(graph); } } return count; } /** * @brief Parallelization over the edges. Avoids storing an edge list by * iterating over the adjacency list of the graph and keeping track of the * starting vertex through a counter per thread. Finds the max-degree (=core- * number for degeneracy ordering) before starting the actual iteration. * * @tparam Builder_T Able to construct a subgraph given a graph and two * adjacent nodes * @tparam Counter_T Able to count the k-cliques within a (sub-)graph * @param g The complete graph * @param cli Command line interface with relevant parameters * @return unsigned long long Total of counted k-cliques */ template<typename Builder_T, typename Counter_T, typename CGraph = CSRGraph> unsigned long long edge( CGraph& g, const CLApp& cli) { const CLCliqueApp& dcli = dynamic_cast<const CLCliqueApp&>(cli); const int cliqueSize = dcli.clique_size(); if(cliqueSize == 1) return g.num_nodes(); if(cliqueSize == 2) return g.num_edges(); uint coreNumber = 0; for(NodeId node = 0; node < g.num_nodes(); node++) { coreNumber = coreNumber > g.out_degree(node) ? coreNumber : g.out_degree(node); } unsigned long long count = 0; const NodeId first = 0; const NodeId last = g.num_nodes() -1; #pragma omp parallel reduction(+:count) { Builder_T builder(g, coreNumber); Counter_T counter(cliqueSize-2, coreNumber); NodeId u_counter = 0; #pragma omp for schedule(dynamic, 1) nowait for(auto it = g.out_neigh(first).begin(); it < g.out_neigh(last).end(); it++) { while(it >= g.out_neigh(u_counter).end()) { u_counter++; } CGraph graph = builder.buildSubGraph(u_counter, *it); count += counter.count(graph); } } return count; } /** * @brief Parallelization over the edges using omp tasks. Requires one builder * and one counter per thread (not per task). Finds the max-degree (=core- * number for degeneracy ordering) before starting the actual iteration. * * @tparam Builder_T Able to construct a subgraph given a graph and two * adjacent nodes. * @tparam Counter_T Able to count the k-cliques within a (sub-)graph * @param g The complete graph * @param cli Command line interface with relevant parameters * @return unsigned long long Total count of k-cliques */ template<typename Builder_T, typename Counter_T, typename CGraph = CSRGraph> unsigned long long edge_tasks(CGraph& g, const CLApp& cli) { const CLCliqueApp& dcli = dynamic_cast<const CLCliqueApp&>(cli); const int cliqueSize = dcli.clique_size(); if(cliqueSize == 1) return g.num_nodes(); if(cliqueSize == 2) return g.num_edges(); uint coreNumber = 0; for(NodeId node = 0; node < g.num_nodes(); node++) { coreNumber = coreNumber > g.out_degree(node) ? coreNumber : g.out_degree(node); } unsigned long long count = 0; int length = 1; #ifdef _OPENMP #pragma omp parallel { length = omp_get_max_threads(); } #endif std::vector<unsigned long long> thread_count = std::vector<unsigned long long>(length, 0); std::vector<Builder_T*> builders = std::vector<Builder_T*>(length, nullptr); std::vector<Counter_T*> counters = std::vector<Counter_T*>(length, nullptr); #pragma omp parallel reduction(+:count) { #ifdef _OPENMP int id = omp_get_thread_num(); #endif #ifndef _OPENMP int id = 0; #endif builders[id] = new Builder_T(g, coreNumber); counters[id] = new Counter_T(cliqueSize-2, coreNumber); #pragma omp barrier #pragma omp single { #pragma omp task untied for(NodeId node = 0; node < g.num_nodes(); node++) { for(NodeId neigh : g.out_neigh(node)) { #pragma omp task firstprivate(node, neigh) { #ifdef _OPENMP int iid = omp_get_thread_num(); #endif #ifndef _OPENMP int iid = 0; #endif CGraph graph = builders[iid]->buildSubGraph(node, neigh); thread_count[iid] += counters[iid]->count(graph); } } } } #pragma omp taskwait delete builders[id]; delete counters[id]; count += thread_count[id]; } return count; } /** * @brief Simple parallelization over the edges. Very fast parallel * loop, but needs to build and store an edge list first. * * @tparam Builder_T Able to construct a subgraph given a graph and two * adjacent nodes * @tparam Counter_T Able to count the k-cliques within a (sub-)graph * @param g The complete graph * @param cli Command line interface with relevant parameters * @return unsigned long long Total count of k-cliques */ template<typename Builder_T, typename Counter_T, typename CGraph = CSRGraph> unsigned long long edge_simple(CGraph& g, const CLApp& cli) { const CLCliqueApp& dcli = dynamic_cast<const CLCliqueApp&>(cli); const int cliqueSize = dcli.clique_size(); if(cliqueSize == 1) return g.num_nodes(); if(cliqueSize == 2) return g.num_edges(); uint coreNumber = 0; NodeId* start = new NodeId[g.num_edges()]; NodeId* target = new NodeId[g.num_edges()]; size_t idx = 0; for(NodeId node = 0; node < g.num_nodes(); node++) { coreNumber = coreNumber > g.out_degree(node) ? coreNumber : g.out_degree(node); for(NodeId neigh : g.out_neigh(node)) { start[idx] = node; target[idx] = neigh; idx++; } } unsigned long long count = 0; int length = 1; #ifdef _OPENMP { length = omp_get_max_threads(); } #endif std::vector<unsigned long long> thread_count = std::vector<unsigned long long>(length, 0); #pragma omp parallel reduction(+:count) { Builder_T builder(g, coreNumber); Counter_T counter(cliqueSize-2, coreNumber); #pragma omp for schedule(dynamic, 1) nowait for(size_t i = 0; i < g.num_edges(); i++) { CGraph graph = builder.buildSubGraph(start[i], target[i]); #ifdef _OPENMP int id = omp_get_thread_num(); #endif #ifndef _OPENMP int id = 0; #endif thread_count[id] += counter.count(graph); } #ifdef _OPENMP int id = omp_get_thread_num(); #endif #ifndef _OPENMP int id = 0; #endif count += thread_count[id]; } delete[] start; delete[] target; return count; } /** * @brief Provides mixed parallelization over the edges and nodes of a graph, * based on a simple heuristic. * IMPORTANT: efficiency/speed is untested * * @tparam Builder_T Able to construct a subgraph given a graph and * either a node or two adjecent nodes * @tparam Counter_T Able to count the k-cliques within a (sub-)graph * @param cli Command line interface with relevant parameters * @param g The complete graph * @param coreNumber The corenumber (=max degree in a graph with * degeneracy ordering) * @return unsigned long long Total count of k-cliques */ template<typename Builder_T, typename Counter_T, typename CGraph = CSRGraph> unsigned long long mixed(const CLApp& cli, CGraph& g, int coreNumber=0) { const int cliqueSize = 1; if(cliqueSize == 1) return g.num_nodes(); if(cliqueSize == 2) return g.num_edges(); if (coreNumber == 0) { for(NodeId node = 0; node < g.num_nodes(); node++) { coreNumber = coreNumber > g.out_degree(node) ? coreNumber : g.out_degree(node); } } unsigned long long count = 0; const NodeId first = 0; const NodeId last = g.num_nodes() -1; #pragma omp parallel reduction(+:count) { Builder_T builder(g, coreNumber); Counter_T counter(cliqueSize -1, coreNumber); #pragma omp for schedule(dynamic, 1) nowait for(NodeId node = 0; node < g.num_nodes(); node++) { if(g.out_degree(node) > 3*cliqueSize) { for(NodeId neigh : g.out_neigh(node)) { #pragma omp task { CGraph graph = builder.buildSubGraph(node, neigh); count += counter.count(graph); } } } else { #pragma omp task { CGraph graph = builder.buildSubGraph(node); count += counter.count(graph); } } } } #pragma omp taskwait return count; } } namespace Serial { /** * @brief Does not provide any parallelization. Serves as a proxy to have * the same structure as with the parallelization routines * * @tparam Counter_T Able to count the k-cliques within a (sub-)graph * @param g The complete graph * @param cli Command line interface with relevant parameters * @return unsigned long long Total count of k-cliques */ template<typename Counter_T, typename CGraph = CSRGraph> unsigned long long standard(CGraph& g, const CLApp& cli) { const CLCliqueApp& dcli = dynamic_cast<const CLCliqueApp&>(cli); const int cliqueSize = dcli.clique_size(); if(cliqueSize == 1) return g.num_nodes(); if(cliqueSize == 2) return g.num_edges(); Counter_T counter(cliqueSize, g); return counter.count(g); } } }
convolution_3x3_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd43_transform_kernel_pack8to4_int8_msa(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8.create(inch / 8, 36, outch / 4, (size_t)2u * 32, 32); int q = 0; for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat kernel_tm = kernel_tm_pack8.channel(q / 4); for (int k = 0; k < 36; k++) { short* g00 = kernel_tm.row<short>(k); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { const short* k00 = k0.row<const short>(p + i); const short* k10 = k1.row<const short>(p + i); const short* k20 = k2.row<const short>(p + i); const short* k30 = k3.row<const short>(p + i); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00 += 4; } } } } } static void conv3x3s1_winograd43_pack8to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { v16i8 _r00_01 = __msa_ld_b(r0, 0); v16i8 _r02_03 = __msa_ld_b(r0 + 16, 0); v16i8 _r04_05 = __msa_ld_b(r0 + 32, 0); v16i8 _extr0001 = __msa_clti_s_b(_r00_01, 0); v16i8 _extr0203 = __msa_clti_s_b(_r02_03, 0); v16i8 _extr0405 = __msa_clti_s_b(_r04_05, 0); v8i16 _r00 = (v8i16)__msa_ilvr_b(_extr0001, _r00_01); v8i16 _r01 = (v8i16)__msa_ilvl_b(_extr0001, _r00_01); v8i16 _r02 = (v8i16)__msa_ilvr_b(_extr0203, _r02_03); v8i16 _r03 = (v8i16)__msa_ilvl_b(_extr0203, _r02_03); v8i16 _r04 = (v8i16)__msa_ilvr_b(_extr0405, _r04_05); v8i16 _r05 = (v8i16)__msa_ilvl_b(_extr0405, _r04_05); v8i16 _v5 = __msa_fill_h(5); v8i16 _tmp0m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r00, 2), _r04), __msa_mulv_h(_r02, _v5)); v8i16 _tmp1m = __msa_subv_h(__msa_addv_h(_r04, _r03), __msa_slli_h(__msa_addv_h(_r01, _r02), 2)); v8i16 _tmp2m = __msa_addv_h(__msa_subv_h(_r04, _r03), __msa_slli_h(__msa_subv_h(_r01, _r02), 2)); v8i16 _tmp3m = __msa_subv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1)); v8i16 _tmp4m = __msa_addv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1)); v8i16 _tmp5m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r01, 2), _r05), __msa_mulv_h(_r03, _v5)); __msa_st_h(_tmp0m, tmp[0][m], 0); __msa_st_h(_tmp1m, tmp[1][m], 0); __msa_st_h(_tmp2m, tmp[2][m], 0); __msa_st_h(_tmp3m, tmp[3][m], 0); __msa_st_h(_tmp4m, tmp[4][m], 0); __msa_st_h(_tmp5m, tmp[5][m], 0); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { v8i16 _tmp00 = __msa_ld_h(tmp[m][0], 0); v8i16 _tmp01 = __msa_ld_h(tmp[m][1], 0); v8i16 _tmp02 = __msa_ld_h(tmp[m][2], 0); v8i16 _tmp03 = __msa_ld_h(tmp[m][3], 0); v8i16 _tmp04 = __msa_ld_h(tmp[m][4], 0); v8i16 _tmp05 = __msa_ld_h(tmp[m][5], 0); v8i16 _v5 = __msa_fill_h(5); v8i16 _r0tm0 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp00, 2), _tmp04), __msa_mulv_h(_tmp02, _v5)); v8i16 _r0tm1 = __msa_subv_h(__msa_addv_h(_tmp04, _tmp03), __msa_slli_h(__msa_addv_h(_tmp01, _tmp02), 2)); v8i16 _r0tm2 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp03), __msa_slli_h(__msa_subv_h(_tmp01, _tmp02), 2)); v8i16 _r0tm3 = __msa_subv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1)); v8i16 _r0tm4 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1)); v8i16 _r0tm5 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp01, 2), _tmp05), __msa_mulv_h(_tmp03, _v5)); __msa_st_h(_r0tm0, r0_tm_0, 0); __msa_st_h(_r0tm1, r0_tm_1, 0); __msa_st_h(_r0tm2, r0_tm_2, 0); __msa_st_h(_r0tm3, r0_tm_3, 0); __msa_st_h(_r0tm4, r0_tm_4, 0); __msa_st_h(_r0tm5, r0_tm_5, 0); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 1 < tiles; i += 2) { short* tmpptr = tm2.row<short>(i / 2); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { v8i16 _r0 = __msa_ld_h(r0, 0); v8i16 _r1 = __msa_ld_h(r0 + 8, 0); __msa_st_h(_r0, tmpptr, 0); __msa_st_h(_r1, tmpptr + 8, 0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 16; } } for (; i < tiles; i++) { short* tmpptr = tm2.row<short>(i / 2 + i % 2); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { v8i16 _r0 = __msa_ld_h(r0, 0); __msa_st_h(_r0, tmpptr, 0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * 4, 4, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 1 < tiles; i += 2) { const short* r0 = bb2.row<const short>(i / 2); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 v4i32 _sum0 = __msa_fill_w(0); v4i32 _sum1 = __msa_fill_w(0); v4i32 _sum2 = __msa_fill_w(0); v4i32 _sum3 = __msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 64); __builtin_prefetch(k0 + 128); v8i16 _w0 = __msa_ld_h(k0, 0); v8i16 _w1 = __msa_ld_h(k0 + 8, 0); v8i16 _w2 = __msa_ld_h(k0 + 16, 0); v8i16 _w3 = __msa_ld_h(k0 + 24, 0); v8i16 _extw0 = __msa_clti_s_h(_w0, 0); v8i16 _extw1 = __msa_clti_s_h(_w1, 0); v8i16 _extw2 = __msa_clti_s_h(_w2, 0); v8i16 _extw3 = __msa_clti_s_h(_w3, 0); v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0); v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0); v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1); v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1); v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2); v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2); v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3); v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3); v4i32 _val0_0 = __msa_fill_w(r0[0]); v4i32 _val0_1 = __msa_fill_w(r0[1]); v4i32 _val0_2 = __msa_fill_w(r0[2]); v4i32 _val0_3 = __msa_fill_w(r0[3]); v4i32 _val0_4 = __msa_fill_w(r0[4]); v4i32 _val0_5 = __msa_fill_w(r0[5]); v4i32 _val0_6 = __msa_fill_w(r0[6]); v4i32 _val0_7 = __msa_fill_w(r0[7]); v4i32 _val1_0 = __msa_fill_w(r0[8]); v4i32 _val1_1 = __msa_fill_w(r0[9]); v4i32 _val1_2 = __msa_fill_w(r0[10]); v4i32 _val1_3 = __msa_fill_w(r0[11]); v4i32 _val1_4 = __msa_fill_w(r0[12]); v4i32 _val1_5 = __msa_fill_w(r0[13]); v4i32 _val1_6 = __msa_fill_w(r0[14]); v4i32 _val1_7 = __msa_fill_w(r0[15]); _sum0 = __msa_maddv_w(_sum0, _w0l, _val0_0); _sum1 = __msa_maddv_w(_sum1, _w0h, _val0_1); _sum2 = __msa_maddv_w(_sum2, _w0l, _val1_0); _sum3 = __msa_maddv_w(_sum3, _w0h, _val1_1); _sum0 = __msa_maddv_w(_sum0, _w1l, _val0_2); _sum1 = __msa_maddv_w(_sum1, _w1h, _val0_3); _sum2 = __msa_maddv_w(_sum2, _w1l, _val1_2); _sum3 = __msa_maddv_w(_sum3, _w1h, _val1_3); _sum0 = __msa_maddv_w(_sum0, _w2l, _val0_4); _sum1 = __msa_maddv_w(_sum1, _w2h, _val0_5); _sum2 = __msa_maddv_w(_sum2, _w2l, _val1_4); _sum3 = __msa_maddv_w(_sum3, _w2h, _val1_5); _sum0 = __msa_maddv_w(_sum0, _w3l, _val0_6); _sum1 = __msa_maddv_w(_sum1, _w3h, _val0_7); _sum2 = __msa_maddv_w(_sum2, _w3l, _val1_6); _sum3 = __msa_maddv_w(_sum3, _w3h, _val1_7); r0 += 16; k0 += 32; } _sum0 = __msa_addv_w(_sum0, _sum1); _sum2 = __msa_addv_w(_sum2, _sum3); __msa_st_w(_sum0, output0_tm, 0); __msa_st_w(_sum2, output0_tm + 4, 0); output0_tm += 8; } for (; i < tiles; i++) { const short* r0 = bb2.row<const short>(i / 2 + i % 2); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 v4i32 _sum0 = __msa_fill_w(0); v4i32 _sum1 = __msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(k0 + 128); v8i16 _w0 = __msa_ld_h(k0, 0); v8i16 _w1 = __msa_ld_h(k0 + 8, 0); v8i16 _w2 = __msa_ld_h(k0 + 16, 0); v8i16 _w3 = __msa_ld_h(k0 + 24, 0); v8i16 _extw0 = __msa_clti_s_h(_w0, 0); v8i16 _extw1 = __msa_clti_s_h(_w1, 0); v8i16 _extw2 = __msa_clti_s_h(_w2, 0); v8i16 _extw3 = __msa_clti_s_h(_w3, 0); v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0); v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0); v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1); v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1); v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2); v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2); v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3); v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3); v4i32 _val0 = __msa_fill_w(r0[0]); v4i32 _val1 = __msa_fill_w(r0[1]); v4i32 _val2 = __msa_fill_w(r0[2]); v4i32 _val3 = __msa_fill_w(r0[3]); v4i32 _val4 = __msa_fill_w(r0[4]); v4i32 _val5 = __msa_fill_w(r0[5]); v4i32 _val6 = __msa_fill_w(r0[6]); v4i32 _val7 = __msa_fill_w(r0[7]); _sum0 = __msa_maddv_w(_sum0, _w0l, _val0); _sum1 = __msa_maddv_w(_sum1, _w0h, _val1); _sum0 = __msa_maddv_w(_sum0, _w1l, _val2); _sum1 = __msa_maddv_w(_sum1, _w1h, _val3); _sum0 = __msa_maddv_w(_sum0, _w2l, _val4); _sum1 = __msa_maddv_w(_sum1, _w2h, _val5); _sum0 = __msa_maddv_w(_sum0, _w3l, _val6); _sum1 = __msa_maddv_w(_sum1, _w3h, _val7); r0 += 8; k0 += 32; } _sum0 = __msa_addv_w(_sum0, _sum1); __msa_st_w(_sum0, output0_tm, 0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u * 4, 4, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6][4]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 4; const int* output0_tm_1 = output0_tm_0 + tiles * 4; const int* output0_tm_2 = output0_tm_0 + tiles * 8; const int* output0_tm_3 = output0_tm_0 + tiles * 12; const int* output0_tm_4 = output0_tm_0 + tiles * 16; const int* output0_tm_5 = output0_tm_0 + tiles * 20; int* output0 = out0.row<int>(i * 4) + (j * 4) * 4; for (int m = 0; m < 5; m++) { v4i32 _out0tm0 = __msa_ld_w(output0_tm_0, 0); v4i32 _out0tm1 = __msa_ld_w(output0_tm_1, 0); v4i32 _out0tm2 = __msa_ld_w(output0_tm_2, 0); v4i32 _out0tm3 = __msa_ld_w(output0_tm_3, 0); v4i32 _out0tm4 = __msa_ld_w(output0_tm_4, 0); v4i32 _out0tm5 = __msa_ld_w(output0_tm_5, 0); v4i32 _tmp02a = __msa_addv_w(_out0tm1, _out0tm2); v4i32 _tmp13a = __msa_subv_w(_out0tm1, _out0tm2); v4i32 _tmp02b = __msa_addv_w(_out0tm3, _out0tm4); v4i32 _tmp13b = __msa_subv_w(_out0tm3, _out0tm4); v4i32 _tmp0m = __msa_addv_w(__msa_addv_w(_out0tm0, _tmp02a), _tmp02b); v4i32 _tmp1m = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1)); v4i32 _tmp2m = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2)); v4i32 _tmp3m = __msa_addv_w(__msa_addv_w(_tmp13a, __msa_slli_w(_out0tm5, 2)), __msa_slli_w(_tmp13b, 3)); __msa_st_w(_tmp0m, tmp[0][m], 0); __msa_st_w(_tmp1m, tmp[1][m], 0); __msa_st_w(_tmp2m, tmp[2][m], 0); __msa_st_w(_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 5; m < 6; m++) { v4i32 _out0tm0 = __msa_ld_w(output0_tm_0, 0); v4i32 _out0tm1 = __msa_ld_w(output0_tm_1, 0); v4i32 _out0tm2 = __msa_ld_w(output0_tm_2, 0); v4i32 _out0tm3 = __msa_ld_w(output0_tm_3, 0); v4i32 _out0tm4 = __msa_ld_w(output0_tm_4, 0); v4i32 _out0tm5 = __msa_ld_w(output0_tm_5, 0); v4i32 _tmp02a = __msa_addv_w(_out0tm1, _out0tm2); v4i32 _tmp13a = __msa_subv_w(_out0tm1, _out0tm2); v4i32 _tmp02b = __msa_addv_w(_out0tm3, _out0tm4); v4i32 _tmp13b = __msa_subv_w(_out0tm3, _out0tm4); v4i32 _tmp0m = __msa_addv_w(__msa_addv_w(_out0tm0, _tmp02a), _tmp02b); v4i32 _tmp1m = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1)); v4i32 _tmp2m = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2)); v4i32 _tmp3m = __msa_addv_w(__msa_addv_w(_tmp13a, __msa_slli_w(_out0tm5, 2)), __msa_slli_w(_tmp13b, 3)); _tmp0m = __msa_slli_w(_tmp0m, 2); _tmp1m = __msa_slli_w(_tmp1m, 2); _tmp2m = __msa_slli_w(_tmp2m, 2); _tmp3m = __msa_slli_w(_tmp3m, 2); __msa_st_w(_tmp0m, tmp[0][m], 0); __msa_st_w(_tmp1m, tmp[1][m], 0); __msa_st_w(_tmp2m, tmp[2][m], 0); __msa_st_w(_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { v4i32 _tmp00 = __msa_ld_w(tmp[m][0], 0); v4i32 _tmp01 = __msa_ld_w(tmp[m][1], 0); v4i32 _tmp02 = __msa_ld_w(tmp[m][2], 0); v4i32 _tmp03 = __msa_ld_w(tmp[m][3], 0); v4i32 _tmp04 = __msa_ld_w(tmp[m][4], 0); v4i32 _tmp05 = __msa_ld_w(tmp[m][5], 0); v4i32 _tmp02a = __msa_addv_w(_tmp01, _tmp02); v4i32 _tmp13a = __msa_subv_w(_tmp01, _tmp02); v4i32 _tmp02b = __msa_addv_w(_tmp03, _tmp04); v4i32 _tmp13b = __msa_subv_w(_tmp03, _tmp04); v4i32 _out00 = __msa_addv_w(__msa_addv_w(_tmp00, _tmp02a), _tmp02b); v4i32 _out01 = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1)); v4i32 _out02 = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2)); v4i32 _out03 = __msa_addv_w(__msa_addv_w(_tmp05, _tmp13a), __msa_slli_w(_tmp13b, 3)); // TODO use integer trick for division by 576 v4f32 _v576 = __msa_fill_w_f32(1.0 / 576); _out00 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out00), _v576)); _out01 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out01), _v576)); _out02 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out02), _v576)); _out03 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out03), _v576)); __msa_st_w(_out00, output0, 0); __msa_st_w(_out01, output0 + 4, 0); __msa_st_w(_out02, output0 + 8, 0); __msa_st_w(_out03, output0 + 12, 0); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
hopscotch_hash.h
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /** * Hopscotch hash is modified from the code downloaded from * https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing * with the following terms of usage */ //////////////////////////////////////////////////////////////////////////////// //TERMS OF USAGE //------------------------------------------------------------------------------ // // Permission to use, copy, modify and distribute this software and // its documentation for any purpose is hereby granted without fee, // provided that due acknowledgments to the authors are provided and // this permission notice appears in all copies of the software. // The software is provided "as is". There is no warranty of any kind. // //Authors: // Maurice Herlihy // Brown University // and // Nir Shavit // Tel-Aviv University // and // Moran Tzafrir // Tel-Aviv University // // Date: July 15, 2008. // //////////////////////////////////////////////////////////////////////////////// // Programmer : Moran Tzafrir (MoranTza@gmail.com) // Modified : Jongsoo Park (jongsoo.park@intel.com) // Oct 1, 2015. // //////////////////////////////////////////////////////////////////////////////// #ifndef hypre_HOPSCOTCH_HASH_HEADER #define hypre_HOPSCOTCH_HASH_HEADER //#include <strings.h> #include <string.h> #include <stdio.h> #include <limits.h> //#include <math.h> #ifdef HYPRE_USING_OPENMP #include <omp.h> #endif //#include "_hypre_utilities.h" // Potentially architecture specific features used here: // __sync_val_compare_and_swap #ifdef __cplusplus extern "C" { #endif /****************************************************************************** * This next section of code is here instead of in _hypre_utilities.h to get * around some portability issues with Visual Studio. By putting it here, we * can explicitly include this '.h' file in a few files in hypre and compile * them with C++ instead of C (VS does not support C99 'inline'). ******************************************************************************/ #ifdef HYPRE_USING_ATOMIC static inline HYPRE_Int hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval ) { #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 return __sync_val_compare_and_swap(ptr, oldval, newval); //#elif defind _MSC_VER //return _InterlockedCompareExchange((long *)ptr, newval, oldval); //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //_Atomic HYPRE_Int *atomic_ptr = ptr; //atomic_compare_exchange_strong(atomic_ptr, &oldval, newval); //return oldval; #endif } static inline HYPRE_Int hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value ) { #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 return __sync_fetch_and_add(ptr, value); //#elif defined _MSC_VER //return _InterlockedExchangeAdd((long *)ptr, value); //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //_Atomic HYPRE_Int *atomic_ptr = ptr; //return atomic_fetch_add(atomic_ptr, value); #endif } #else // !HYPRE_USING_ATOMIC static inline HYPRE_Int hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval ) { if (*ptr == oldval) { *ptr = newval; return oldval; } else { return *ptr; } } static inline HYPRE_Int hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value ) { HYPRE_Int oldval = *ptr; *ptr += value; return oldval; } #endif // !HYPRE_USING_ATOMIC /******************************************************************************/ // Constants ................................................................ #define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32) #define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024) #define HYPRE_HOPSCOTCH_HASH_EMPTY (0) #define HYPRE_HOPSCOTCH_HASH_BUSY (1) // Small Utilities .......................................................... static inline HYPRE_Int first_lsb_bit_indx( hypre_uint x ) { HYPRE_Int pos; #if defined(_MSC_VER) || defined(__MINGW64__) if (x == 0) { pos = 0; } else { for (pos = 1; !(x & 1); ++pos) { x >>= 1; } } #else pos = ffs(x); #endif return (pos - 1); } /** * hypre_Hash is adapted from xxHash with the following license. */ /* xxHash - Extremely Fast Hash algorithm Header File Copyright (C) 2012-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - xxHash source repository : https://github.com/Cyan4973/xxHash */ /*************************************** * Constants ***************************************/ #define HYPRE_XXH_PRIME32_1 2654435761U #define HYPRE_XXH_PRIME32_2 2246822519U #define HYPRE_XXH_PRIME32_3 3266489917U #define HYPRE_XXH_PRIME32_4 668265263U #define HYPRE_XXH_PRIME32_5 374761393U #define HYPRE_XXH_PRIME64_1 11400714785074694791ULL #define HYPRE_XXH_PRIME64_2 14029467366897019727ULL #define HYPRE_XXH_PRIME64_3 1609587929392839161ULL #define HYPRE_XXH_PRIME64_4 9650029242287828579ULL #define HYPRE_XXH_PRIME64_5 2870177450012600261ULL #define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) #define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) #if defined(HYPRE_MIXEDINT) || defined(HYPRE_BIGINT) static inline HYPRE_BigInt hypre_BigHash( HYPRE_BigInt input ) { hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input); hypre_ulongint k1 = input; k1 *= HYPRE_XXH_PRIME64_2; k1 = HYPRE_XXH_rotl64(k1, 31); k1 *= HYPRE_XXH_PRIME64_1; h64 ^= k1; h64 = HYPRE_XXH_rotl64(h64, 27) * HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4; h64 ^= h64 >> 33; h64 *= HYPRE_XXH_PRIME64_2; h64 ^= h64 >> 29; h64 *= HYPRE_XXH_PRIME64_3; h64 ^= h64 >> 32; #ifndef NDEBUG if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) { hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY); hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64); } #endif return h64; } #else static inline HYPRE_Int hypre_BigHash(HYPRE_Int input) { hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input); // 1665863975 is added to input so that // only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY. // Hence, we're fine as long as key is non-negative. h32 += (input + 1665863975) * HYPRE_XXH_PRIME32_3; h32 = HYPRE_XXH_rotl32(h32, 17) * HYPRE_XXH_PRIME32_4; h32 ^= h32 >> 15; h32 *= HYPRE_XXH_PRIME32_2; h32 ^= h32 >> 13; h32 *= HYPRE_XXH_PRIME32_3; h32 ^= h32 >> 16; //hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32); return h32; } #endif #ifdef HYPRE_BIGINT static inline HYPRE_Int hypre_Hash(HYPRE_Int input) { hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input); hypre_ulongint k1 = input; k1 *= HYPRE_XXH_PRIME64_2; k1 = HYPRE_XXH_rotl64(k1, 31); k1 *= HYPRE_XXH_PRIME64_1; h64 ^= k1; h64 = HYPRE_XXH_rotl64(h64, 27) * HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4; h64 ^= h64 >> 33; h64 *= HYPRE_XXH_PRIME64_2; h64 ^= h64 >> 29; h64 *= HYPRE_XXH_PRIME64_3; h64 ^= h64 >> 32; #ifndef NDEBUG if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) { hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY); hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64); } #endif return h64; } #else static inline HYPRE_Int hypre_Hash(HYPRE_Int input) { hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input); // 1665863975 is added to input so that // only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY. // Hence, we're fine as long as key is non-negative. h32 += (input + 1665863975) * HYPRE_XXH_PRIME32_3; h32 = HYPRE_XXH_rotl32(h32, 17) * HYPRE_XXH_PRIME32_4; h32 ^= h32 >> 15; h32 *= HYPRE_XXH_PRIME32_2; h32 ^= h32 >> 13; h32 *= HYPRE_XXH_PRIME32_3; h32 ^= h32 >> 16; //hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32); return h32; } #endif static inline void hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *start_seg, #endif HYPRE_Int *free_bucket, HYPRE_Int *free_dist ) { HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = s->hopInfo[move_bucket]; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]); if (start_seg != move_segment) { omp_set_lock(&move_segment->lock); } #endif if (start_hop_info == s->hopInfo[move_bucket]) { // new_free_bucket -> free_bucket and empty new_free_bucket HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist; s->key[*free_bucket] = s->key[new_free_bucket]; s->hash[*free_bucket] = s->hash[new_free_bucket]; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif s->hopInfo[move_bucket] |= (1U << move_free_dist); s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif } ++move_bucket; } *free_bucket = -1; *free_dist = 0; } static inline void hypre_UnorderedBigIntSetFindCloserFreeBucket( hypre_UnorderedBigIntSet *s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *start_seg, #endif HYPRE_Int *free_bucket, HYPRE_Int *free_dist ) { HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = s->hopInfo[move_bucket]; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]); if (start_seg != move_segment) { omp_set_lock(&move_segment->lock); } #endif if (start_hop_info == s->hopInfo[move_bucket]) { // new_free_bucket -> free_bucket and empty new_free_bucket HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist; s->key[*free_bucket] = s->key[new_free_bucket]; s->hash[*free_bucket] = s->hash[new_free_bucket]; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif s->hopInfo[move_bucket] |= (1U << move_free_dist); s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif } ++move_bucket; } *free_bucket = -1; *free_dist = 0; } static inline void hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *start_seg, #endif hypre_HopscotchBucket **free_bucket, HYPRE_Int *free_dist) { hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = move_bucket->hopInfo; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]); if (start_seg != move_segment) { omp_set_lock(&move_segment->lock); } #endif if (start_hop_info == move_bucket->hopInfo) { // new_free_bucket -> free_bucket and empty new_free_bucket hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist; (*free_bucket)->data = new_free_bucket->data; (*free_bucket)->key = new_free_bucket->key; (*free_bucket)->hash = new_free_bucket->hash; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif move_bucket->hopInfo |= (1U << move_free_dist); move_bucket->hopInfo &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif } ++move_bucket; } *free_bucket = NULL; *free_dist = 0; } static inline void hypre_UnorderedBigIntMapFindCloserFreeBucket( hypre_UnorderedBigIntMap *m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *start_seg, #endif hypre_BigHopscotchBucket **free_bucket, HYPRE_Int *free_dist) { hypre_BigHopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = move_bucket->hopInfo; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]); if (start_seg != move_segment) { omp_set_lock(&move_segment->lock); } #endif if (start_hop_info == move_bucket->hopInfo) { // new_free_bucket -> free_bucket and empty new_free_bucket hypre_BigHopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist; (*free_bucket)->data = new_free_bucket->data; (*free_bucket)->key = new_free_bucket->key; (*free_bucket)->hash = new_free_bucket->hash; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif move_bucket->hopInfo |= (1U << move_free_dist); move_bucket->hopInfo &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif } ++move_bucket; } *free_bucket = NULL; *free_dist = 0; } void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s ); void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s ); void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m ); void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m ); // Query Operations ......................................................... static inline HYPRE_Int hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s, HYPRE_Int key ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; #endif HYPRE_Int bucket = hash & s->bucketMask; hypre_uint hopInfo = s->hopInfo[bucket]; if (0 == hopInfo) { return 0; } else if (1 == hopInfo ) { if (hash == s->hash[bucket] && key == s->key[bucket]) { return 1; } else { return 0; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int startTimestamp = segment->timestamp; #endif while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) { return 1; } hopInfo &= ~(1U << i); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (segment->timestamp == startTimestamp) { return 0; } #endif HYPRE_Int i; for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i) { if (hash == s->hash[bucket + i] && key == s->key[bucket + i]) { return 1; } } return 0; } static inline HYPRE_Int hypre_UnorderedBigIntSetContains( hypre_UnorderedBigIntSet *s, HYPRE_BigInt key ) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &s->segments[(HYPRE_Int)(hash & s->segmentMask)]; #endif HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask); hypre_uint hopInfo = s->hopInfo[bucket]; if (0 == hopInfo) { return 0; } else if (1 == hopInfo ) { if (hash == s->hash[bucket] && key == s->key[bucket]) { return 1; } else { return 0; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int startTimestamp = segment->timestamp; #endif while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) { return 1; } hopInfo &= ~(1U << i); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (segment->timestamp == startTimestamp) { return 0; } #endif HYPRE_Int i; for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i) { if (hash == s->hash[bucket + i] && key == s->key[bucket + i]) { return 1; } } return 0; } /** * @ret -1 if key doesn't exist */ static inline HYPRE_Int hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m, HYPRE_Int key ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; #endif hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]); hypre_uint hopInfo = elmAry->hopInfo; if (0 == hopInfo) { return -1; } else if (1 == hopInfo ) { if (hash == elmAry->hash && key == elmAry->key) { return elmAry->data; } else { return -1; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int startTimestamp = segment->timestamp; #endif while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_HopscotchBucket* currElm = elmAry + i; if (hash == currElm->hash && key == currElm->key) { return currElm->data; } hopInfo &= ~(1U << i); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (segment->timestamp == startTimestamp) { return -1; } #endif hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]); HYPRE_Int i; for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket) { if (hash == currBucket->hash && key == currBucket->key) { return currBucket->data; } } return -1; } static inline HYPRE_Int hypre_UnorderedBigIntMapGet( hypre_UnorderedBigIntMap *m, HYPRE_BigInt key ) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &m->segments[(HYPRE_Int)(hash & m->segmentMask)]; #endif hypre_BigHopscotchBucket *elmAry = &(m->table[(HYPRE_Int)(hash & m->bucketMask)]); hypre_uint hopInfo = elmAry->hopInfo; if (0 == hopInfo) { return -1; } else if (1 == hopInfo ) { if (hash == elmAry->hash && key == elmAry->key) { return elmAry->data; } else { return -1; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int startTimestamp = segment->timestamp; #endif while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_BigHopscotchBucket* currElm = elmAry + i; if (hash == currElm->hash && key == currElm->key) { return currElm->data; } hopInfo &= ~(1U << i); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (segment->timestamp == startTimestamp) { return -1; } #endif hypre_BigHopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]); HYPRE_Int i; for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket) { if (hash == currBucket->hash && key == currBucket->key) { return currBucket->data; } } return -1; } //status Operations ......................................................... static inline HYPRE_Int hypre_UnorderedIntSetSize( hypre_UnorderedIntSet *s ) { HYPRE_Int counter = 0; HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedBigIntSetSize( hypre_UnorderedBigIntSet *s ) { HYPRE_Int counter = 0; HYPRE_BigInt n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedIntMapSize( hypre_UnorderedIntMap *m ) { HYPRE_Int counter = 0; HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if ( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash ) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedBigIntMapSize( hypre_UnorderedBigIntMap *m ) { HYPRE_Int counter = 0; HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if ( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash ) { ++counter; } } return counter; } HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len ); HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len ); //modification Operations ................................................... static inline void hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s, HYPRE_Int key ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; omp_set_lock(&segment->lock); #endif HYPRE_Int bucket = hash & s->bucketMask; //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = s->hopInfo[bucket]; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... HYPRE_Int free_bucket = bucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket], (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) { break; } } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { s->key[free_bucket] = key; s->hash[free_bucket] = hash; s->hopInfo[bucket] |= 1U << free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return; } hypre_UnorderedIntSetFindCloserFreeBucket(s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH segment, #endif &free_bucket, &free_dist); } while (-1 != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return; } static inline void hypre_UnorderedBigIntSetPut( hypre_UnorderedBigIntSet *s, HYPRE_BigInt key ) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; omp_set_lock(&segment->lock); #endif HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = s->hopInfo[bucket]; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... HYPRE_Int free_bucket = bucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket], (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) { break; } } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { s->key[free_bucket] = key; s->hash[free_bucket] = hash; s->hopInfo[bucket] |= 1U << free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return; } hypre_UnorderedBigIntSetFindCloserFreeBucket(s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH segment, #endif &free_bucket, &free_dist); } while (-1 != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return; } static inline HYPRE_Int hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m, HYPRE_Int key, HYPRE_Int data ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; omp_set_lock(&segment->lock); #endif hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = startBucket->hopInfo; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_HopscotchBucket* currElm = startBucket + i; if (hash == currElm->hash && key == currElm->key) { HYPRE_Int rc = currElm->data; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return rc; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... hypre_HopscotchBucket* free_bucket = startBucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) { break; } } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { free_bucket->data = data; free_bucket->key = key; free_bucket->hash = hash; startBucket->hopInfo |= 1U << free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return HYPRE_HOPSCOTCH_HASH_EMPTY; } hypre_UnorderedIntMapFindCloserFreeBucket(m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH segment, #endif &free_bucket, &free_dist); } while (NULL != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return HYPRE_HOPSCOTCH_HASH_EMPTY; } static inline HYPRE_Int hypre_UnorderedBigIntMapPutIfAbsent( hypre_UnorderedBigIntMap *m, HYPRE_BigInt key, HYPRE_Int data) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; omp_set_lock(&segment->lock); #endif hypre_BigHopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = startBucket->hopInfo; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_BigHopscotchBucket* currElm = startBucket + i; if (hash == currElm->hash && key == currElm->key) { HYPRE_Int rc = currElm->data; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return rc; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... hypre_BigHopscotchBucket* free_bucket = startBucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) { break; } } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { free_bucket->data = data; free_bucket->key = key; free_bucket->hash = hash; startBucket->hopInfo |= 1U << free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return HYPRE_HOPSCOTCH_HASH_EMPTY; } hypre_UnorderedBigIntMapFindCloserFreeBucket(m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH segment, #endif &free_bucket, &free_dist); } while (NULL != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return HYPRE_HOPSCOTCH_HASH_EMPTY; } #ifdef __cplusplus } // extern "C" #endif #endif // hypre_HOPSCOTCH_HASH_HEADER
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/xml-tree.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image, % const size_t width,const size_t height, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o offset: the mean offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict threshold_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p++; } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(image,histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property); return(BilevelImage(image,QuantumRange*threshold/100.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImageChannel method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold) % MagickBooleanType BilevelImageChannel(Image *image, % const ChannelType channel,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: define the threshold values. % % Aside: You can get the same results as operator using LevelImageChannels() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold) { MagickBooleanType status; status=BilevelImageChannel(image,DefaultChannels,threshold); return(status); } MagickExport MagickBooleanType BilevelImageChannel(Image *image, const ChannelType channel,const double threshold) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); /* Bilevel threshold image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 : QuantumRange); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? 0 : QuantumRange); else SetPixelAlpha(q,(MagickRealType) GetPixelAlpha(q) <= threshold ? OpaqueOpacity : TransparentOpacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image,const char *threshold) % MagickBooleanType BlackThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=BlackThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* Black threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) < threshold.red)) SetPixelRed(q,0); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) < threshold.green)) SetPixelGreen(q,0); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) < threshold.blue)) SetPixelBlue(q,0); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) < threshold.opacity)) SetPixelOpacity(q,0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x) < threshold.index)) SetPixelIndex(indexes+x,0); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImageChannel method is: % % MagickBooleanType ClampImage(Image *image) % MagickBooleanType ClampImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % */ MagickExport MagickBooleanType ClampImage(Image *image) { MagickBooleanType status; status=ClampImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType ClampImageChannel(Image *image, const ChannelType channel) { #define ClampImageTag "Clamp/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); q++; } return(SyncImage(image)); } /* Clamp image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampPixel((MagickRealType) GetPixelIndex( indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMapFile(const char *xml, const char *filename,const char *map_id,ExceptionInfo *exception) { const char *attribute, *content; double value; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; map = (ThresholdMap *) NULL; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(map); for (threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { attribute=GetXMLTreeAttribute(threshold, "map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold, "alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } /* The map has been found -- allocate a Threshold Map to return */ map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap)); if (map == (ThresholdMap *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; /* Assign basic attributeibutes. */ attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels, "divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } /* Allocate theshold levels array. */ content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); { char *p; register ssize_t i; /* Parse levels into integer array. */ for (i=0; i< (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() load and search one or more threshold map files for the % a map matching the given name or aliase. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { XMLTreeInfo *thresholds,*threshold,*description; const char *map,*alias,*content; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { map = GetXMLTreeAttribute(threshold, "map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias = GetXMLTreeAttribute(threshold, "alias"); /* alias is optional, no if test needed */ description=GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if ( content == (char *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() uses the ordered dithering technique of reducing color % images to monochrome using positional information to retain as much % information as possible. % % WARNING: This function is deprecated, and is now just a call to % the more more powerful OrderedPosterizeImage(); function. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image) % MagickBooleanType OrderedDitherImageChannel(Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image) { MagickBooleanType status; status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception); return(status); } MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image, const ChannelType channel,ExceptionInfo *exception) { MagickBooleanType status; /* Call the augumented function OrderedPosterizeImage() */ status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedPosterizeImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedPosterizeImage method is: % % MagickBooleanType OrderedPosterizeImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % MagickBooleanType OrderedPosterizeImageChannel(Image *image, % const ChannelType channel,const char *threshold_map, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedPosterizeImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { MagickBooleanType status; status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map, exception); return(status); } MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image, const ChannelType channel,const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; LongPixelPacket levels; MagickBooleanType status; MagickOffsetType progress; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); { char token[MaxTextExtent]; register const char *p; p=(char *)threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MaxTextExtent-1)) break; token[p-threshold_map] = *p; p++; } token[p-threshold_map] = '\0'; map = GetThresholdMap(token, exception); if ( map == (ThresholdMap *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } } /* Set channel levels from extra comma separated arguments Default to 2, the single value given, or individual channel values */ #if 1 { /* parse directly as a comma separated list of integers */ char *p; p = strchr((char *) threshold_map,','); if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) ) levels.index = (unsigned int) strtoul(p, &p, 10); else levels.index = 2; levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0; levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0; levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0; levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0; levels.index = ((channel & IndexChannel) != 0 && (image->colorspace == CMYKColorspace)) ? levels.index : 0; /* if more than a single number, each channel has a separate value */ if ( p != (char *) NULL && *p == ',' ) { p=strchr((char *) threshold_map,','); p++; if ((channel & RedChannel) != 0) levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & GreenChannel) != 0) levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & BlueChannel) != 0) levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace) levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & OpacityChannel) != 0) levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); } } #else /* Parse level values as a geometry */ /* This difficult! * How to map GeometryInfo structure elements into * LongPixelPacket structure elements, but according to channel? * Note the channels list may skip elements!!!! * EG -channel BA -ordered-dither map,2,3 * will need to map g.rho -> l.blue, and g.sigma -> l.opacity * A simpler way is needed, probably converting geometry to a temporary * array, then using channel to advance the index into ssize_t pixel packet. */ #endif #if 0 printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n", levels.red, levels.green, levels.blue, levels.opacity, levels.index); #endif { /* Do the posterized ordered dithering of the image */ ssize_t d; /* d = number of psuedo-level divisions added between color levels */ d = map->divisor-1; /* reduce levels to levels - 1 */ levels.red = levels.red ? levels.red-1 : 0; levels.green = levels.green ? levels.green-1 : 0; levels.blue = levels.blue ? levels.blue-1 : 0; levels.opacity = levels.opacity ? levels.opacity-1 : 0; levels.index = levels.index ? levels.index-1 : 0; if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t threshold, t, l; /* Figure out the dither threshold for this pixel This must be a integer from 1 to map->divisor-1 */ threshold = map->levels[(x%map->width) +map->width*(y%map->height)]; /* Dither each channel in the image as appropriate Notes on the integer Math... total number of divisions = (levels-1)*(divisor-1)+1) t1 = this colors psuedo_level = q->red * total_divisions / (QuantumRange+1) l = posterization level 0..levels t = dither threshold level 0..divisor-1 NB: 0 only on last Each color_level is of size QuantumRange / (levels-1) NB: All input levels and divisor are already had 1 subtracted Opacity is inverted so 'off' represents transparent. */ if (levels.red) { t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1)); l = t/d; t = t-l*d; SetPixelRed(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red))); } if (levels.green) { t = (ssize_t) (QuantumScale*GetPixelGreen(q)* (levels.green*d+1)); l = t/d; t = t-l*d; SetPixelGreen(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green))); } if (levels.blue) { t = (ssize_t) (QuantumScale*GetPixelBlue(q)* (levels.blue*d+1)); l = t/d; t = t-l*d; SetPixelBlue(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue))); } if (levels.opacity) { t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))* (levels.opacity*d+1)); l = t/d; t = t-l*d; SetPixelOpacity(q,ClampToQuantum((MagickRealType) ((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/ levels.opacity))); } if (levels.index) { t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)* (levels.index*d+1)); l = t/d; t = t-l*d; SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+ (t>=threshold))*(MagickRealType) QuantumRange/levels.index))); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImageChannel method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon) % MagickBooleanType PerceptibleImageChannel(Image *image, % const ChannelType channel,const double epsilon) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); } MagickExport MagickBooleanType PerceptibleImageChannel(Image *image, const ChannelType channel,const double epsilon) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); q++; } return(SyncImage(image)); } /* Perceptible image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x), epsilon)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,PerceptibleImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImageChannel(Image *image, % const char *thresholds,ExceptionInfo *exception) % MagickBooleanType RandomThresholdImageChannel(Image *image, % const ChannelType channel,const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing low,high thresholds. If the % string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4 % is performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { MagickBooleanType status; status=RandomThresholdImageChannel(image,DefaultChannels,thresholds, exception); return(status); } MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickStatusType flags; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType min_threshold, max_threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (thresholds == (const char *) NULL) return(MagickTrue); GetMagickPixelPacket(image,&threshold); min_threshold=0.0; max_threshold=(MagickRealType) QuantumRange; flags=ParseGeometry(thresholds,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(thresholds,'%') != (char *) NULL) { max_threshold*=(MagickRealType) (0.01*QuantumRange); min_threshold*=(MagickRealType) (0.01*QuantumRange); } else if (((max_threshold == min_threshold) || (max_threshold == 1)) && (min_threshold <= 8)) { /* Backward Compatibility -- ordered-dither -- IM v 6.2.9-6. */ status=OrderedPosterizeImageChannel(image,channel,thresholds,exception); return(status); } /* Random threshold image. */ status=MagickTrue; progress=0; if (channel == CompositeChannels) { if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { IndexPacket index; MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (intensity < min_threshold) threshold.index=min_threshold; else if (intensity > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType)(QuantumRange* GetPseudoRandomValue(random_info[id])); index=(IndexPacket) (intensity <= threshold.index ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if ((MagickRealType) GetPixelRed(q) < min_threshold) threshold.red=min_threshold; else if ((MagickRealType) GetPixelRed(q) > max_threshold) threshold.red=max_threshold; else threshold.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & GreenChannel) != 0) { if ((MagickRealType) GetPixelGreen(q) < min_threshold) threshold.green=min_threshold; else if ((MagickRealType) GetPixelGreen(q) > max_threshold) threshold.green=max_threshold; else threshold.green=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & BlueChannel) != 0) { if ((MagickRealType) GetPixelBlue(q) < min_threshold) threshold.blue=min_threshold; else if ((MagickRealType) GetPixelBlue(q) > max_threshold) threshold.blue=max_threshold; else threshold.blue=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & OpacityChannel) != 0) { if ((MagickRealType) GetPixelOpacity(q) < min_threshold) threshold.opacity=min_threshold; else if ((MagickRealType) GetPixelOpacity(q) > max_threshold) threshold.opacity=max_threshold; else threshold.opacity=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold) threshold.index=min_threshold; else if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold.opacity ? 0 : QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold.index ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold) % MagickBooleanType WhiteThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=WhiteThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(thresholds,&geometry_info); GetMagickPixelPacket(image,&threshold); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) > threshold.red)) SetPixelRed(q,QuantumRange); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) > threshold.green)) SetPixelGreen(q,QuantumRange); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) > threshold.blue)) SetPixelBlue(q,QuantumRange); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) > threshold.opacity)) SetPixelOpacity(q,QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index) SetPixelIndex(indexes+x,QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
stewart_aslanidi_noble_2009.c
#include <stdio.h> #include "stewart_aslanidi_noble_2009.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { #ifdef _WIN32 printf("Using Stewart-Aslanidi-Noble 2009 CPU model\n"); #else print_to_stdout_and_file("Using Stewart-Aslanidi-Noble 2009 CPU model\n"); #endif first_call = false; } // TODO: Describe the parameters ... // Default initial conditions from the CellML /* sv[0] = -69.1370441635924; sv[1] = 136.781894160227; sv[2] = 8.80420286531673; sv[3] = 0.000101878186157052; sv[4] = 0.0457562667986602; sv[5] = 0.00550281999719088; sv[6] = 0.313213286437995; sv[7] = 0.00953708522974789; sv[8] = 0.0417391656294997; sv[9] = 0.190678733735145; sv[10] = 0.238219836154029; sv[11] = 0.000446818714055411; sv[12] = 0.000287906256206415; sv[13] = 0.989328560287987; sv[14] = 0.995474890442185; sv[15] = 0.999955429598213; sv[16] = 0.96386101799501; sv[17] = 0.00103618091196912; sv[18] = 3.10836886659417; sv[19] = 0.991580051907845; */ // Steady-State after 10000ms in a 1cm cable --> BCL = 500ms sv[0] = -74.391220; sv[1] = 136.781891; sv[2] = 9.046331; sv[3] = 0.000147; sv[4] = 0.011298; sv[5] = 0.239796; sv[6] = 0.361989; sv[7] = 0.020117; sv[8] = 0.015747; sv[9] = 0.351714; sv[10] = 0.142322; sv[11] = 0.000474; sv[12] = 0.000143; sv[13] = 0.730152; sv[14] = 0.943502; sv[15] = 0.994737; sv[16] = 0.964095; sv[17] = 0.000698; sv[18] = 3.899154; sv[19] = 0.894456; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; // uint32_t *mapping = ((uint32_t*)extra_data); #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; // Save old value of the state vector for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; // Solve Right-hand-side of the ODE's RHS_cpu(rY, rDY, stim_current); // Solve model using Forward Euler for(int i = 0; i < NEQ; i++) sv[i] = dt*rDY[i] + rY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current) { // States real STATES[NEQ]; for (int i = 0; i < NEQ; i++) STATES[i] = sv[i]; // This statement if to avoid instability problems when we have a transmembrane potential below -70mV, // which generates NaN on the solution from the ODEs //if (STATES[0] < INITIAL_V) // STATES[0] = INITIAL_V; // Constants real CONSTANTS[52]; CONSTANTS[0] = 8314.472; CONSTANTS[1] = 310; CONSTANTS[2] = 96485.3415; CONSTANTS[3] = 0.185; CONSTANTS[4] = 0.016404; CONSTANTS[5] = 0.03; CONSTANTS[6] = 5.4; CONSTANTS[7] = 140; CONSTANTS[8] = 2; CONSTANTS[9] = 0.0145654; CONSTANTS[10] = 0.0234346; CONSTANTS[11] = 0.065; CONSTANTS[12] = 0.0918; CONSTANTS[13] = 0.2352; CONSTANTS[14] = 130.5744; CONSTANTS[15] = 0.00029; CONSTANTS[16] = 3.98e-5; CONSTANTS[17] = 0.000592; CONSTANTS[18] = 0.08184; CONSTANTS[19] = 0.0227; CONSTANTS[20] = 2.724; CONSTANTS[21] = 1; CONSTANTS[22] = 40; CONSTANTS[23] = 1000; CONSTANTS[24] = 0.1; CONSTANTS[25] = 2.5; CONSTANTS[26] = 0.35; CONSTANTS[27] = 1.38; CONSTANTS[28] = 87.5; CONSTANTS[29] = 0.1238; CONSTANTS[30] = 0.0005; CONSTANTS[31] = 0.0146; CONSTANTS[32] = 0.15; CONSTANTS[33] = 0.045; CONSTANTS[34] = 0.06; CONSTANTS[35] = 0.005; CONSTANTS[36] = 1.5; CONSTANTS[37] = 2.5; CONSTANTS[38] = 1; CONSTANTS[39] = 0.102; CONSTANTS[40] = 0.0038; CONSTANTS[41] = 0.00025; CONSTANTS[42] = 0.00036; CONSTANTS[43] = 0.006375; CONSTANTS[44] = 0.2; CONSTANTS[45] = 0.001; CONSTANTS[46] = 10; CONSTANTS[47] = 0.3; CONSTANTS[48] = 0.4; CONSTANTS[49] = 0.00025; CONSTANTS[50] = 0.001094; CONSTANTS[51] = 5.468e-5; // Algebraics real ALGEBRAIC[76]; ALGEBRAIC[8] = 1.00000/(1.00000+exp((STATES[0]+20.0000)/7.00000)); ALGEBRAIC[22] = 1102.50*exp(- pow(STATES[0]+27.0000, 2.00000)/225.000)+200.000/(1.00000+exp((13.0000 - STATES[0])/10.0000))+180.000/(1.00000+exp((STATES[0]+30.0000)/10.0000))+20.0000; ALGEBRAIC[9] = 0.670000/(1.00000+exp((STATES[0]+35.0000)/7.00000))+0.330000; ALGEBRAIC[23] = 562.000*exp(- pow(STATES[0]+27.0000, 2.00000)/240.000)+31.0000/(1.00000+exp((25.0000 - STATES[0])/10.0000))+80.0000/(1.00000+exp((STATES[0]+30.0000)/10.0000)); ALGEBRAIC[10] = 0.600000/(1.00000+pow(STATES[11]/0.0500000, 2.00000))+0.400000; ALGEBRAIC[24] = 80.0000/(1.00000+pow(STATES[11]/0.0500000, 2.00000))+2.00000; ALGEBRAIC[11] = 1.00000/(1.00000+exp((STATES[0]+27.0000)/13.0000)); ALGEBRAIC[25] = 85.0000*exp(- pow(STATES[0]+25.0000, 2.00000)/320.000)+5.00000/(1.00000+exp((STATES[0] - 40.0000)/5.00000))+42.0000; ALGEBRAIC[12] = 1.00000/(1.00000+exp((20.0000 - STATES[0])/13.0000)); ALGEBRAIC[26] = 10.4500*exp(- pow(STATES[0]+40.0000, 2.00000)/1800.00)+7.30000; ALGEBRAIC[0] = 1.00000/(1.00000+exp((STATES[0]+80.6000)/6.80000)); ALGEBRAIC[14] = 1.00000*exp(- 2.90000 - 0.0400000*STATES[0]); ALGEBRAIC[28] = 1.00000*exp(3.60000+ 0.110000*STATES[0]); ALGEBRAIC[37] = 4000.00/(ALGEBRAIC[14]+ALGEBRAIC[28]); ALGEBRAIC[1] = 1.00000/(1.00000+exp((- 26.0000 - STATES[0])/7.00000)); ALGEBRAIC[15] = 450.000/(1.00000+exp((- 45.0000 - STATES[0])/10.0000)); ALGEBRAIC[29] = 6.00000/(1.00000+exp((STATES[0]+30.0000)/11.5000)); ALGEBRAIC[38] = 1.00000*ALGEBRAIC[15]*ALGEBRAIC[29]; ALGEBRAIC[2] = 1.00000/(1.00000+exp((STATES[0]+88.0000)/24.0000)); ALGEBRAIC[16] = 3.00000/(1.00000+exp((- 60.0000 - STATES[0])/20.0000)); ALGEBRAIC[30] = 1.12000/(1.00000+exp((STATES[0] - 60.0000)/20.0000)); ALGEBRAIC[39] = 1.00000*ALGEBRAIC[16]*ALGEBRAIC[30]; ALGEBRAIC[3] = 1.00000/(1.00000+exp((- 5.00000 - STATES[0])/14.0000)); ALGEBRAIC[17] = 1400.00/ pow((1.00000+exp((5.00000 - STATES[0])/6.00000)), 1.0 / 2); ALGEBRAIC[31] = 1.00000/(1.00000+exp((STATES[0] - 35.0000)/15.0000)); ALGEBRAIC[40] = 1.00000*ALGEBRAIC[17]*ALGEBRAIC[31]+80.0000; ALGEBRAIC[4] = 1.00000/pow(1.00000+exp((- 56.8600 - STATES[0])/9.03000), 2.00000); ALGEBRAIC[18] = 1.00000/(1.00000+exp((- 60.0000 - STATES[0])/5.00000)); ALGEBRAIC[32] = 0.100000/(1.00000+exp((STATES[0]+35.0000)/5.00000))+0.100000/(1.00000+exp((STATES[0] - 50.0000)/200.000)); ALGEBRAIC[41] = 1.00000*ALGEBRAIC[18]*ALGEBRAIC[32]; ALGEBRAIC[5] = 1.00000/pow(1.00000+exp((STATES[0]+71.5500)/7.43000), 2.00000); ALGEBRAIC[19] = (STATES[0]<- 40.0000 ? 0.0570000*exp(- (STATES[0]+80.0000)/6.80000) : 0.00000); ALGEBRAIC[33] = (STATES[0]<- 40.0000 ? 2.70000*exp( 0.0790000*STATES[0])+ 310000.*exp( 0.348500*STATES[0]) : 0.770000/( 0.130000*(1.00000+exp((STATES[0]+10.6600)/- 11.1000)))); ALGEBRAIC[42] = 1.00000/(ALGEBRAIC[19]+ALGEBRAIC[33]); ALGEBRAIC[6] = 1.00000/pow(1.00000+exp((STATES[0]+71.5500)/7.43000), 2.00000); ALGEBRAIC[20] = (STATES[0]<- 40.0000 ? (( ( - 25428.0*exp( 0.244400*STATES[0]) - 6.94800e-06*exp( - 0.0439100*STATES[0]))*(STATES[0]+37.7800))/1.00000)/(1.00000+exp( 0.311000*(STATES[0]+79.2300))) : 0.00000); ALGEBRAIC[34] = (STATES[0]<- 40.0000 ? ( 0.0242400*exp( - 0.0105200*STATES[0]))/(1.00000+exp( - 0.137800*(STATES[0]+40.1400))) : ( 0.600000*exp( 0.0570000*STATES[0]))/(1.00000+exp( - 0.100000*(STATES[0]+32.0000)))); ALGEBRAIC[43] = 1.00000/(ALGEBRAIC[20]+ALGEBRAIC[34]); ALGEBRAIC[7] = 1.00000/(1.00000+exp((- 8.00000 - STATES[0])/7.50000)); ALGEBRAIC[21] = 1.40000/(1.00000+exp((- 35.0000 - STATES[0])/13.0000))+0.250000; ALGEBRAIC[35] = 1.40000/(1.00000+exp((STATES[0]+5.00000)/5.00000)); ALGEBRAIC[44] = 1.00000/(1.00000+exp((50.0000 - STATES[0])/20.0000)); ALGEBRAIC[46] = 1.00000*ALGEBRAIC[21]*ALGEBRAIC[35]+ALGEBRAIC[44]; ALGEBRAIC[61] = (( (( CONSTANTS[20]*CONSTANTS[6])/(CONSTANTS[6]+CONSTANTS[21]))*STATES[2])/(STATES[2]+CONSTANTS[22]))/(1.00000+ 0.124500*exp(( - 0.100000*STATES[0]*CONSTANTS[2])/( CONSTANTS[0]*CONSTANTS[1]))+ 0.0353000*exp(( - STATES[0]*CONSTANTS[2])/( CONSTANTS[0]*CONSTANTS[1]))); ALGEBRAIC[13] = (( CONSTANTS[0]*CONSTANTS[1])/CONSTANTS[2])*log(CONSTANTS[7]/STATES[2]); ALGEBRAIC[54] = CONSTANTS[14]*pow(STATES[8], 3.00000)*STATES[9]*STATES[10]*(STATES[0] - ALGEBRAIC[13]); ALGEBRAIC[55] = CONSTANTS[15]*(STATES[0] - ALGEBRAIC[13]); ALGEBRAIC[62] = ( CONSTANTS[23]*( exp(( CONSTANTS[26]*STATES[0]*CONSTANTS[2])/( CONSTANTS[0]*CONSTANTS[1]))*pow(STATES[2], 3.00000)*CONSTANTS[8] - exp(( (CONSTANTS[26] - 1.00000)*STATES[0]*CONSTANTS[2])/( CONSTANTS[0]*CONSTANTS[1]))*pow(CONSTANTS[7], 3.00000)*STATES[3]*CONSTANTS[25]))/( (pow(CONSTANTS[28], 3.00000)+pow(CONSTANTS[7], 3.00000))*(CONSTANTS[27]+CONSTANTS[8])*(1.00000+ CONSTANTS[24]*exp(( (CONSTANTS[26] - 1.00000)*STATES[0]*CONSTANTS[2])/( CONSTANTS[0]*CONSTANTS[1])))); ALGEBRAIC[47] = STATES[4]*CONSTANTS[9]*(STATES[0] - ALGEBRAIC[13]); ALGEBRAIC[27] = (( CONSTANTS[0]*CONSTANTS[1])/CONSTANTS[2])*log(CONSTANTS[6]/STATES[1]); ALGEBRAIC[50] = 1.00000/(1.00000+exp( 0.100000*(STATES[0]+75.4400))); ALGEBRAIC[51] = CONSTANTS[11]*ALGEBRAIC[50]*((STATES[0] - 8.00000) - ALGEBRAIC[27]); ALGEBRAIC[58] = CONSTANTS[18]*STATES[17]*STATES[16]*(STATES[0] - ALGEBRAIC[27]); ALGEBRAIC[59] = 1.00000/(1.00000+exp((5.00000 - STATES[0])/17.0000)); ALGEBRAIC[60] = CONSTANTS[19]*ALGEBRAIC[59]*(STATES[0] - ALGEBRAIC[27]); ALGEBRAIC[52] = CONSTANTS[12]* pow((CONSTANTS[6]/5.40000), 1.0 / 2)*STATES[5]*STATES[6]*(STATES[0] - ALGEBRAIC[27]); ALGEBRAIC[36] = (( CONSTANTS[0]*CONSTANTS[1])/CONSTANTS[2])*log((CONSTANTS[6]+ CONSTANTS[5]*CONSTANTS[7])/(STATES[1]+ CONSTANTS[5]*STATES[2])); ALGEBRAIC[53] = CONSTANTS[13]*pow(STATES[7], 2.00000)*(STATES[0] - ALGEBRAIC[36]); ALGEBRAIC[56] = ( (( CONSTANTS[16]*STATES[12]*STATES[13]*STATES[14]*STATES[15]*4.00000*(STATES[0] - 15.0000)*pow(CONSTANTS[2], 2.00000))/( CONSTANTS[0]*CONSTANTS[1]))*( 0.250000*STATES[11]*exp(( 2.00000*(STATES[0] - 15.0000)*CONSTANTS[2])/( CONSTANTS[0]*CONSTANTS[1])) - CONSTANTS[8]))/(exp(( 2.00000*(STATES[0] - 15.0000)*CONSTANTS[2])/( CONSTANTS[0]*CONSTANTS[1])) - 1.00000); ALGEBRAIC[45] = (( 0.500000*CONSTANTS[0]*CONSTANTS[1])/CONSTANTS[2])*log(CONSTANTS[8]/STATES[3]); ALGEBRAIC[57] = CONSTANTS[17]*(STATES[0] - ALGEBRAIC[45]); ALGEBRAIC[64] = ( CONSTANTS[31]*(STATES[0] - ALGEBRAIC[27]))/(1.00000+exp((25.0000 - STATES[0])/5.98000)); ALGEBRAIC[63] = ( CONSTANTS[29]*STATES[3])/(STATES[3]+CONSTANTS[30]); ALGEBRAIC[48] = STATES[4]*CONSTANTS[10]*(STATES[0] - ALGEBRAIC[27]); ALGEBRAIC[49] = ALGEBRAIC[47]+ALGEBRAIC[48]; ALGEBRAIC[65] = CONSTANTS[43]/(1.00000+pow(CONSTANTS[41], 2.00000)/pow(STATES[3], 2.00000)); ALGEBRAIC[66] = CONSTANTS[42]*(STATES[18] - STATES[3]); ALGEBRAIC[67] = CONSTANTS[40]*(STATES[11] - STATES[3]); ALGEBRAIC[69] = 1.00000/(1.00000+( CONSTANTS[44]*CONSTANTS[45])/pow(STATES[3]+CONSTANTS[45], 2.00000)); ALGEBRAIC[68] = CONSTANTS[37] - (CONSTANTS[37] - CONSTANTS[38])/(1.00000+pow(CONSTANTS[36]/STATES[18], 2.00000)); ALGEBRAIC[71] = CONSTANTS[33]*ALGEBRAIC[68]; ALGEBRAIC[70] = CONSTANTS[32]/ALGEBRAIC[68]; ALGEBRAIC[72] = ( ALGEBRAIC[70]*pow(STATES[11], 2.00000)*STATES[19])/(CONSTANTS[34]+ ALGEBRAIC[70]*pow(STATES[11], 2.00000)); ALGEBRAIC[73] = CONSTANTS[39]*ALGEBRAIC[72]*(STATES[18] - STATES[11]); ALGEBRAIC[74] = 1.00000/(1.00000+( CONSTANTS[46]*CONSTANTS[47])/pow(STATES[18]+CONSTANTS[47], 2.00000)); ALGEBRAIC[75] = 1.00000/(1.00000+( CONSTANTS[48]*CONSTANTS[49])/pow(STATES[11]+CONSTANTS[49], 2.00000)); // Rates // ** I manually added the stimulus current real RATES[NEQ]; RATES[0] = (- 1.00000/1.00000)*(ALGEBRAIC[51]+ALGEBRAIC[58]+ALGEBRAIC[60]+ALGEBRAIC[52]+ALGEBRAIC[53]+ALGEBRAIC[56]+ALGEBRAIC[61]+ALGEBRAIC[54]+ALGEBRAIC[55]+ALGEBRAIC[62]+ALGEBRAIC[57]+ALGEBRAIC[64]+ALGEBRAIC[63]+ALGEBRAIC[49]+stim_current); RATES[1] = (( - 1.00000*((ALGEBRAIC[51]+ALGEBRAIC[58]+ALGEBRAIC[48]+ALGEBRAIC[60]+ALGEBRAIC[52]+ALGEBRAIC[53]+ALGEBRAIC[64]) - 2.00000*ALGEBRAIC[61]))/( 1.00000*CONSTANTS[4]*CONSTANTS[2]))*CONSTANTS[3]; RATES[2] = (( - 1.00000*(ALGEBRAIC[54]+ALGEBRAIC[55]+ALGEBRAIC[47]+ 3.00000*ALGEBRAIC[61]+ 3.00000*ALGEBRAIC[62]))/( 1.00000*CONSTANTS[4]*CONSTANTS[2]))*CONSTANTS[3]; RATES[3] = ALGEBRAIC[69]*((( (ALGEBRAIC[66] - ALGEBRAIC[65])*CONSTANTS[50])/CONSTANTS[4]+ALGEBRAIC[67]) - ( 1.00000*((ALGEBRAIC[57]+ALGEBRAIC[63]) - 2.00000*ALGEBRAIC[62])*CONSTANTS[3])/( 2.00000*1.00000*CONSTANTS[4]*CONSTANTS[2])); RATES[4] = (ALGEBRAIC[0] - STATES[4])/ALGEBRAIC[37]; RATES[5] = (ALGEBRAIC[1] - STATES[5])/ALGEBRAIC[38]; RATES[6] = (ALGEBRAIC[2] - STATES[6])/ALGEBRAIC[39]; RATES[7] = (ALGEBRAIC[3] - STATES[7])/ALGEBRAIC[40]; RATES[8] = (ALGEBRAIC[4] - STATES[8])/ALGEBRAIC[41]; RATES[9] = (ALGEBRAIC[5] - STATES[9])/ALGEBRAIC[42]; RATES[10] = (ALGEBRAIC[6] - STATES[10])/ALGEBRAIC[43]; RATES[11] = ALGEBRAIC[75]*((( - 1.00000*ALGEBRAIC[56]*CONSTANTS[3])/( 2.00000*1.00000*CONSTANTS[51]*CONSTANTS[2])+( ALGEBRAIC[73]*CONSTANTS[50])/CONSTANTS[51]) - ( ALGEBRAIC[67]*CONSTANTS[4])/CONSTANTS[51]); RATES[12] = (ALGEBRAIC[7] - STATES[12])/ALGEBRAIC[46]; RATES[13] = (ALGEBRAIC[8] - STATES[13])/ALGEBRAIC[22]; RATES[14] = (ALGEBRAIC[9] - STATES[14])/ALGEBRAIC[23]; RATES[15] = (ALGEBRAIC[10] - STATES[15])/ALGEBRAIC[24]; RATES[16] = (ALGEBRAIC[11] - STATES[16])/ALGEBRAIC[25]; RATES[17] = (ALGEBRAIC[12] - STATES[17])/ALGEBRAIC[26]; RATES[18] = ALGEBRAIC[74]*(ALGEBRAIC[65] - (ALGEBRAIC[73]+ALGEBRAIC[66])); RATES[19] = - ALGEBRAIC[71]*STATES[11]*STATES[19]+ CONSTANTS[35]*(1.00000 - STATES[19]); for (int i = 0; i < NEQ; i++) rDY_[i] = RATES[i]; } // The automatic pacing from the Purkinje cells can be interrupted by blocking the INa current by 100% (ALGEBRAIC[54] = INa)
yolov2_forward_network_quantized_origin.c
#include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h // softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h #define GEMMCONV //#define SSE41 //#undef AVX #define MAX_VAL_8 (256/2 - 1) // 7-bit (1-bit sign) #define MAX_VAL_16 (256*256/2 - 1) // 15-bit (1-bit sign) #define MAX_VAL_32 (256*256*256*256/2 - 1) // 31-bit (1-bit sign) int max_abs(int src, int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } short int max_abs_short(short int src, short int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } // im2col.c int8_t im2col_get_pixel_int8(int8_t *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } // im2col.c //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_int8(int8_t* data_im, int channels, int height, int width, int ksize, int stride, int pad, int8_t* data_col) { int c, h, w; int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel_int8(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; //#pragma simd parallel for for (j = 0; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j], MAX_VAL_16); c_tmp[j] = 0; } } free(c_tmp); } void gemm_nn_int8_int32(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int32_t *C, int ldc) { int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; //#pragma simd parallel for for (j = 0; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j], MAX_VAL_32); c_tmp[j] = 0; } } free(c_tmp); } void forward_convolutional_layer_q(layer l, network_state state) { int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1 int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1 int i, j; int const out_size = out_h*out_w; typedef int16_t conv_t; // l.output conv_t *output_q = calloc(l.outputs, sizeof(conv_t)); state.input_int8 = (int8_t *)calloc(l.inputs, sizeof(int)); int z; for (z = 0; z < l.inputs; ++z) { int16_t src = state.input[z] * l.input_quant_multiplier; state.input_int8[z] = max_abs(src, MAX_VAL_8); } // Convolution int m = l.n; int k = l.size*l.size*l.c; int n = out_h*out_w; int8_t *a = l.weights_int8; int8_t *b = (int8_t *)state.workspace; conv_t *c = output_q; // int16_t // Use GEMM (as part of BLAS) im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); int t; // multi-thread gemm #pragma omp parallel for for (t = 0; t < m; ++t) { gemm_nn_int8_int16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); } free(state.input_int8); // Bias addition int fil; for (fil = 0; fil < l.n; ++fil) { for (j = 0; j < out_size; ++j) { output_q[fil*out_size + j] = output_q[fil*out_size + j] + l.biases_quant[fil]; } } // Activation if (l.activation == LEAKY) { for (i = 0; i < l.n*out_size; ++i) { output_q[i] = (output_q[i] > 0) ? output_q[i] : output_q[i] / 10; } } // De-scaling float ALPHA1 = 1 / (l.input_quant_multiplier * l.weights_quant_multiplier); for (i = 0; i < l.outputs; ++i) { l.output[i] = output_q[i] * ALPHA1; } // saving l.output == next layer input // int abc; // char outfile[30]; // sprintf(outfile, "weights/CONV%d_OUT.txt",l.index); // FILE *fp_out = fopen(outfile, "w"); // for (abc = 0; abc < out_size * l.n; abc++){ // fprintf(fp_out, "%x", l.output[abc]); // } free(output_q); } void yolov2_forward_network_q(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { forward_convolutional_layer_q(l, state); } else if (l.type == MAXPOOL) { forward_maxpool_layer_cpu(l, state); } else if (l.type == ROUTE) { forward_route_layer_cpu(l, state); } else if (l.type == REORG) { forward_reorg_layer_cpu(l, state); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cpu(l, state); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cpu(l, state); } else if (l.type == YOLO) { forward_yolo_layer_cpu(l, state); } else if (l.type == REGION) { forward_region_layer_cpu(l, state); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; // Saving output // int out_h = (l.h + 2 * l.pad - l.size) + 1; // int out_w = (l.w + 2 * l.pad - l.size) + 1; // int output_size = out_h * out_w; // int k; // char outfile[30]; // sprintf(outfile, "weights/CONV%d_OUT.txt",i); // FILE *fp_out = fopen(outfile, "w"); // for (k = 0; k < output_size; k++){ // fprintf(fp_out, "%x\n", l.output[k]); // } } } // detect on CPU float *network_predict_quantized(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; state.truth = 0; state.train = 0; state.delta = 0; yolov2_forward_network_q(net, state); // network on CPU //float *out = get_network_output(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; } //mean function for z-norm float _mean(float *weights, int filter_num, int weight_size){ float sum = 0; float mean; for (int i = 0; i < weight_size; i++){ sum += weights[filter_num * weight_size + i]; } mean = sum / weight_size; return mean; } // variance function for z-norm float _variance(float *weights, int filter_num, float mean, int weight_size){ float deviation_square_sum = 0; float variance; for (int i=0; i < weight_size; i++){ deviation_square_sum += pow(weights[filter_num * weight_size + i] - mean, 2); } variance = deviation_square_sum / weight_size; return variance; } // standard deviation function for z-norm float _std_deviation(float variance){ float std_deviation = sqrt(variance); return std_deviation; } // get min-max from weights float get_minmax(layer *l, int weights_size){ float max = 0; float min = 0; for (int i = 0; i < weights_size; i++){ if (max < l->weights[i]) max = l->weights[i]; if (min > l->weights[i]) min = l->weights[i]; } printf("The value of minmax : %f\n", max - min); return max - min; } // get mean float get_mean(layer *l, int weights_size){ float sum = 0; for (int i=0; i<weights_size; i++){ sum += l->weights[i]; } return sum / weights_size; } //get variance float get_variance(layer *l, int weights_size){ float deviation_square_sum = 0; float variance; float mean = get_mean(l, weights_size); for (int i=0; i < weights_size; i++){ deviation_square_sum += pow(l->weights[i] - mean, 2); } variance = deviation_square_sum / weights_size; return variance; } // get std_deviation float get_std_deviation(layer *l, int weights_size){ return sqrt(get_variance(l, weights_size)); } // This function contaminates original weights. // Have to additional work for contamination problem. void do_normalization(layer *l, char *method){ // get copy of weights printf("check 2\n"); size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; float copied_weights[weights_size]; int fil, i; // memcpy(copied_weights, l->weights, sizeof(l->weights)); printf("check 3\n"); for (fil = 0; fil < l->n; ++fil) { for (i = 0; i < filter_size; ++i) { copied_weights[fil*filter_size + i] = l->weights[fil*filter_size + i]; } } // debug handling part // printf("*** This is preview for weights ***\n"); // for (i=0; i<10; i++){ // printf("weight[%d] : [%f]\n", i, copied_weights[i]); // } // if method is minmax // 22.03.04 below minmax working distinguish of filter. if (strcmp(method, "minmax") == 0){ float norm_weight; int j; // per filter for (fil = 0; fil < l->n; ++fil) { float max = 0; float min = 0; // get min, max of weights for (i = 0; i < filter_size; ++i) { if (max < copied_weights[fil*filter_size + i]) max = copied_weights[fil*filter_size + i]; if (min > copied_weights[fil*filter_size + i]) min = copied_weights[fil*filter_size + i]; } printf("min, max : %f %f\n", min, max); // do min-max normalization for (i = 0; i < filter_size; ++i) { norm_weight = (copied_weights[fil*filter_size + i] - min) / (max - min); copied_weights[fil*filter_size + i] = norm_weight; } } // debug handling part // printf("*** This is preview for normalized weights ***\n"); // for (i=0; i<10; i++){ // printf("weight[%d] : [%f]\n", i, copied_weights[i]); // } // re-copy to origin weight // memcpy(l->weights, copied_weights, sizeof(l->weights)); for (fil = 0; fil < l->n; ++fil) { for (i = 0; i < filter_size; ++i) { l->weights[fil*filter_size + i] = copied_weights[fil*filter_size + i]; } } } // if method is znorm // 22.03.05 below code is about znorm if (strcmp(method, "znorm") == 0){ // per filter for (fil = 0; fil < l->n; ++fil) { // get mean, variance, std_deviation float mean = _mean(copied_weights, fil, filter_size); float variance = _variance(copied_weights, fil, mean, filter_size); float std_deviation = _std_deviation(variance); printf("[filter num : %d][mean : %f] [variance : %f] [standard deviation : %f]\n", fil, mean, variance, std_deviation); // do z-normalization for (i = 0; i < filter_size; ++i) { float norm_weight = (copied_weights[fil*filter_size + i] - mean) / std_deviation; copied_weights[fil*filter_size + i] = norm_weight; } } // re-copy to origin weight for (i = 0; i < weights_size; i++){ l->weights[i] = copied_weights[i]; } } } /* Quantization-related */ void do_quantization(network net) { int counter = 0; char* method = "znorm"; // minmax, znorm int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; /* TODO: implement quantization The implementation given below is a naive version of per-network quantization; implement your own quantization that minimizes the mAP degradation */ printf("\n"); if (l->type == CONVOLUTIONAL) { // Quantize conv layer only size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; // float layer_std_weight[11] = {}; float layer_minmax_weight[11] = {256.032, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997}; // {1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1} => 64.16% // {1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1} => 64.06% // {1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1} => 64.19% // {1, 2, 4, 1, 1, 4, 1, 1, 1, 1, 1} => 64.22% // {1, 2, 2, 2, 1, 4, 1, 1, 1, 1, 1} => 64.28% // {1, 2, 2, 1, 1, 4, 1, 1, 1, 1, 1} => 64.52% // {1, 2, 2, 1, 1, 4, 4, 1, 1, 1, 1} => 65.28% // {1, 2, 2, 1, 1, 4, 2, 1, 1, 1, 1} => 65.92% // {1, 2, 2, 1, 1, 4, 3, 1, 1, 1, 1} => 66.54% // {1, 2, 2, 1, 1, 4, 3, 1, 1.5, 1, 1} => 66.75% // {1, 2, 2, 1, 1, 4, 3, 1, 1.5, 2, 1} => 67.23% // {2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 64% // {2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 65% // {3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 69.74% // {4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 70.35% // {4.0005, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 70.63% // below codes are for debugging // printf("%d\n", sizeof(l->weights)*weights_size); // printf("%d\n", sizeof(l->weights)); // printf("%d\n", sizeof(l->weights[0])); // printf("%d\n", sizeof(float)); int i, fil; // Input Scaling if (counter >= net.input_calibration_size) { printf(" Warning: CONV%d has no corresponding input_calibration parameter - default value 16 will be used;\n", j); } l->input_quant_multiplier = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 16; // Using 16 as input_calibration as default value // l->input_quant_multiplier = floor(l->input_quant_multiplier*pow(2,12))/pow(2,12); ++counter; printf("check 1\n"); // Weight Quantization // do_normalization(l, method); // for (fil = 0; fil < l->n; ++fil) { // for (i = 0; i < filter_size; ++i) { // float w = (l->weights[fil*filter_size + i] - 0.5) * 127; // Scale // l->weights_int8[fil*filter_size + i] = max_abs(w, MAX_VAL_8); // Clip // } // } // Below annotation is a pure-skeleton code. l->weights_quant_multiplier = 1 / get_minmax(l, weights_size) * layer_minmax_weight[counter - 1]; // l->weights_quant_multiplier = 1 / get_std_deviation(l, weights_size); // Arbitrarily set to 32; you should devise your own method to calculate the weight multiplier for (fil = 0; fil < l->n; ++fil) { for (i = 0; i < filter_size; ++i) { float w = l->weights[fil*filter_size + i] * l->weights_quant_multiplier; // Scale l->weights_int8[fil*filter_size + i] = max_abs(w, MAX_VAL_8); // Clip } } // Bias Quantization float biases_multiplier = (l->weights_quant_multiplier * l->input_quant_multiplier); for (fil = 0; fil < l->n; ++fil) { float b = l->biases[fil] * biases_multiplier; // Scale l->biases_quant[fil] = max_abs(b, MAX_VAL_16); // Clip } printf(" CONV%d multipliers: input %g, weights %g, bias %g \n", j, l->input_quant_multiplier, l->weights_quant_multiplier, biases_multiplier); } else { printf(" No quantization for layer %d (layer type: %d) \n", j, l->type); } } } // Save quantized weights, bias, and scale void save_quantized_model(network net) { int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; if (l->type == CONVOLUTIONAL) { size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; printf(" Saving quantized weights, bias, and scale for CONV%d \n", j); char weightfile[30]; char biasfile[30]; char scalefile[30]; char origin_weightfile[30]; char outfile[30]; sprintf(weightfile, "weights/CONV%d_W.txt", j); sprintf(biasfile, "weights/CONV%d_B.txt", j); sprintf(scalefile, "weights/CONV%d_S.txt", j); sprintf(origin_weightfile, "weights/CONV%d_ORIGIN.txt", j); // sprintf(outfile, "weights/CONV%d_OUT.txt", j); int k; // int out_h = (l->h + 2 * l->pad - l->size) / l->stride + 1; // int out_w = (l->w + 2 * l->pad - l->size) / l->stride + 1; // int output_size = out_h * out_w; // FILE *fp_out = fopen(outfile, "w"); // for (k = 0; k < output_size * l->n; k++){ // fprintf(fp_out, "%x\n", l->output[k]); // } FILE *fp_ori = fopen(origin_weightfile, "w"); for (k = 0; k < weights_size; k++){ // float origin_weight = k < weight_size ? l->weights[k] : 0; fprintf(fp_ori, "%f\n", l->weights[k]); } fclose(fp_ori); FILE *fp_w = fopen(weightfile, "w"); for (k = 0; k < weights_size; k = k + 4) { uint8_t first = k < weights_size ? l->weights_int8[k] : 0; uint8_t second = k+1 < weights_size ? l->weights_int8[k+1] : 0; uint8_t third = k+2 < weights_size ? l->weights_int8[k+2] : 0; uint8_t fourth = k+3 < weights_size ? l->weights_int8[k+3] : 0; fprintf(fp_w, "%02x%02x%02x%02x\n", first, second, third, fourth); } fclose(fp_w); FILE *fp_b = fopen(biasfile, "w"); for (k = 0; k < l->n; k = k + 4) { uint16_t first = k < l->n ? l->biases_quant[k] : 0; uint16_t second = k+1 < l->n ? l->biases_quant[k+1] : 0; fprintf(fp_b, "%04x%04x\n", first, second); } fclose(fp_b); FILE *fp_s = fopen(scalefile, "w"); fprintf(fp_s, "%f\n", l->input_quant_multiplier); fclose(fp_s); } } }
ZQ_CNN_MTCNN.h
#ifndef _ZQ_CNN_MTCNN_H_ #define _ZQ_CNN_MTCNN_H_ #pragma once #include "ZQ_CNN_Net.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { class ZQ_CNN_MTCNN { public: using string = std::string; ZQ_CNN_MTCNN() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net> pnet, rnet, onet, lnet; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images; ZQ_CNN_Tensor4D_NHW_C_Align128bit input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1, bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model,true,1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet[0].GetInputDim(C, H, W); lnet_size = H; } return ret; } bool InitFromBuffer( const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len, const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len, const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len, int thread_num = 1, bool has_lnet = false, const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0) { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if(has_lnet) lnet.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len,pnet_model,pnet_model_len, true, 1e-9, true) && rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true) && onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, _width, _height); } double t2 = omp_get_wtime(); if (!_Rnet_stage(firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, _width, _height); } if (!has_lnet || !do_landmark) { double t3 = omp_get_wtime(); if (!_Onet_stage(secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } } else { double t3 = omp_get_wtime(); if (!_Onet_stage(secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, _width, _height); } double t4 = omp_get_wtime(); if (!_Lnet_stage(thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } } return true; } bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, _width, _height); } double t2 = omp_get_wtime(); if (!_Rnet_stage(firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, _width, _height); } if (!has_lnet || !do_landmark) { return false; } double t3 = omp_get_wtime(); if (!_Onet_stage(secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, _width, _height); } double t4 = omp_get_wtime(); if (!_Lnet106_stage(thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } return true; } private: void _compute_Pnet_single_thread(std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if(row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } void _compute_Pnet_multi_thread(std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1) for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); if (width != _width || height != _height) return false; if (!input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(maps, mapH, mapW); } else { _compute_Pnet_multi_thread(maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p ++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height,true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t2)); return true; } bool _Rnet_stage(std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { if (!do_landmark && it->score > early_accept_thresh) { early_accept_thirdBbox.push_back(*it); } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox) { double t4 = omp_get_wtime(); fourthBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j] = copy_fourthBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } fourthBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet106_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox) { double t4 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> fourthBbox; std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1; task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2; task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1; task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2; task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area; task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score; task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist; } } } resultBbox.resize(l_count); for (int i = 0; i < l_count; i++) { resultBbox[i].col1 = fourthBbox[i].col1; resultBbox[i].col2 = fourthBbox[i].col2; resultBbox[i].row1 = fourthBbox[i].row1; resultBbox[i].row2 = fourthBbox[i].row2; resultBbox[i].score = fourthBbox[i].score; resultBbox[i].exist = fourthBbox[i].exist; resultBbox[i].area = fourthBbox[i].area; } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } resultBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait == UndefinedPixelTrait) && (draw_info->fill.alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue, exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; start=0; s=segment_stack; PushSegmentStack(y_offset,x_offset,x_offset,1); PushSegmentStack(y_offset+1,x_offset,x_offset,-1); GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Tile fill color onto floodplane. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelViaPixelInfo(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method,const StopInfo *stops, const size_t number_stops,ExceptionInfo *exception) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(stops != (const StopInfo *) NULL); assert(number_stops > 0); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1.0)*cosine)+ fabs((double) (image->rows-1.0)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/ 2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1.0)* (image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1.0)/2.0; gradient->radii.y=(double) (image->rows-1.0)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) (MagickMin((image->columns-1.0), (image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=number_stops; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memcpy(gradient->stops,stops,(size_t) number_stops* sizeof(*stops)); /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register size_t *histogram; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel = GetPixelChannelChannel(linear_image,i); PixelTrait traits = GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if ((paint_traits & CopyPixelTrait) != 0) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(linear_image,OilPaintImageTag,progress, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill argument. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target, % const PixelInfo *fill,const MagickBooleanType invert, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); ConformPixelInfo(image,fill,&conform_fill,exception); ConformPixelInfo(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert) { PixelTrait traits; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(image,(Quantum) conform_fill.red,q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(image,(Quantum) conform_fill.green,q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(image,(Quantum) conform_fill.blue,q); traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlack(image,(Quantum) conform_fill.black,q); traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelAlpha(image,(Quantum) conform_fill.alpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,OpaquePaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransparentPaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransparentPaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" #define ThrowWandException(severity,tag,context) \ { \ (void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \ tag,"`%s'",context); \ return(MagickFalse); \ } /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireCacheView(pixel_view->wand->images); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireCacheView(pixel_view->wand->images); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
cones.c
#include "cones.h" #include "linalg.h" #include "scs.h" #include "scs_blas.h" /* contains BLAS(X) macros and type info */ #include "util.h" #define CONE_TOL (1e-9) #define CONE_THRESH (1e-8) #define EXP_CONE_MAX_ITERS (100) #define BOX_CONE_MAX_ITERS (25) #define POW_CONE_MAX_ITERS (20) /* In the box cone projection we penalize the `t` term additionally by this * factor. This encourages the `t` term to stay close to the incoming `t` term, * which should provide better convergence since typically the `t` term does * not appear in the linear system other than `t = 1`. Setting to 1 is * the vanilla projection. */ #define BOX_T_SCALE (1.) /* Box cone limits (+ or -) taken to be INF */ #define MAX_BOX_VAL (1e15) #ifdef USE_LAPACK #ifdef __cplusplus extern "C" { #endif void BLAS(syev)(const char *jobz, const char *uplo, blas_int *n, scs_float *a, blas_int *lda, scs_float *w, scs_float *work, blas_int *lwork, blas_int *info); blas_int BLAS(syrk)(const char *uplo, const char *trans, const blas_int *n, const blas_int *k, const scs_float *alpha, const scs_float *a, const blas_int *lda, const scs_float *beta, scs_float *c, const blas_int *ldc); void BLAS(scal)(const blas_int *n, const scs_float *sa, scs_float *sx, const blas_int *incx); #ifdef __cplusplus } #endif #endif /* set the vector of rho y terms, based on scale and cones */ void SCS(set_rho_y_vec)(const ScsCone *k, scs_float scale, scs_float *rho_y_vec, scs_int m) { scs_int i, count = 0; /* f cone */ for (i = 0; i < k->z; ++i) { /* set rho_y small for z, similar to rho_x term, since z corresponds to * dual free cone, this effectively decreases penalty on those entries * and lets them be determined almost entirely by the linear system solve */ rho_y_vec[i] = 1.0 / (1000. * scale); } count += k->z; /* others */ for (i = count; i < m; ++i) { rho_y_vec[i] = 1.0 / scale; } /* Note, if updating this to use different scales for other cones (e.g. box) * then you must be careful to also include the effect of the rho_y_vec * in the cone projection operator. */ /* Increase rho_y_vec for the t term in the box cone */ if (k->bsize) { rho_y_vec[k->z + k->l] *= BOX_T_SCALE; } } static inline scs_int get_sd_cone_size(scs_int s) { return (s * (s + 1)) / 2; } /* * boundaries will contain array of indices of rows of A corresponding to * cone boundaries, boundaries[0] is starting index for cones of size strictly * larger than 1, boundaries malloc-ed here so should be freed. */ scs_int SCS(set_cone_boundaries)(const ScsCone *k, scs_int **cone_boundaries) { scs_int i, s_cone_sz, count = 0; scs_int cone_boundaries_len = 1 + k->qsize + k->ssize + k->ed + k->ep + k->psize; scs_int *b = (scs_int *)scs_calloc(cone_boundaries_len, sizeof(scs_int)); /* cones that can be scaled independently */ b[count] = k->z + k->l + k->bsize; count += 1; /* started at 0 now move to first entry */ for (i = 0; i < k->qsize; ++i) { b[count + i] = k->q[i]; } count += k->qsize; for (i = 0; i < k->ssize; ++i) { s_cone_sz = get_sd_cone_size(k->s[i]); b[count + i] = s_cone_sz; } count += k->ssize; /* add ssize here not ssize * (ssize + 1) / 2 */ /* exp cones */ for (i = 0; i < k->ep + k->ed; ++i) { b[count + i] = 3; } count += k->ep + k->ed; /* power cones */ for (i = 0; i < k->psize; ++i) { b[count + i] = 3; } count += k->psize; /* other cones */ *cone_boundaries = b; return cone_boundaries_len; } static scs_int get_full_cone_dims(const ScsCone *k) { scs_int i, c = k->z + k->l + k->bsize; if (k->qsize) { for (i = 0; i < k->qsize; ++i) { c += k->q[i]; } } if (k->ssize) { for (i = 0; i < k->ssize; ++i) { c += get_sd_cone_size(k->s[i]); } } if (k->ed) { c += 3 * k->ed; } if (k->ep) { c += 3 * k->ep; } if (k->psize) { c += 3 * k->psize; } return c; } scs_int SCS(validate_cones)(const ScsData *d, const ScsCone *k) { scs_int i; if (get_full_cone_dims(k) != d->m) { scs_printf("cone dimensions %li not equal to num rows in A = m = %li\n", (long)get_full_cone_dims(k), (long)d->m); return -1; } if (k->z && k->z < 0) { scs_printf("free cone dimension error\n"); return -1; } if (k->l && k->l < 0) { scs_printf("lp cone dimension error\n"); return -1; } if (k->bsize) { if (k->bsize < 0) { scs_printf("box cone dimension error\n"); return -1; } for (i = 0; i < k->bsize - 1; ++i) { if (k->bl[i] > k->bu[i]) { scs_printf("infeasible: box lower bound larger than upper bound\n"); return -1; } } } if (k->qsize && k->q) { if (k->qsize < 0) { scs_printf("soc cone dimension error\n"); return -1; } for (i = 0; i < k->qsize; ++i) { if (k->q[i] < 0) { scs_printf("soc cone dimension error\n"); return -1; } } } if (k->ssize && k->s) { if (k->ssize < 0) { scs_printf("sd cone dimension error\n"); return -1; } for (i = 0; i < k->ssize; ++i) { if (k->s[i] < 0) { scs_printf("sd cone dimension error\n"); return -1; } } } if (k->ed && k->ed < 0) { scs_printf("ep cone dimension error\n"); return -1; } if (k->ep && k->ep < 0) { scs_printf("ed cone dimension error\n"); return -1; } if (k->psize && k->p) { if (k->psize < 0) { scs_printf("power cone dimension error\n"); return -1; } for (i = 0; i < k->psize; ++i) { if (k->p[i] < -1 || k->p[i] > 1) { scs_printf("power cone error, values must be in [-1,1]\n"); return -1; } } } return 0; } void SCS(finish_cone)(ScsConeWork *c) { #ifdef USE_LAPACK if (c->Xs) { scs_free(c->Xs); } if (c->Z) { scs_free(c->Z); } if (c->e) { scs_free(c->e); } if (c->work) { scs_free(c->work); } #endif if (c->s) { scs_free(c->s); } if (c->bu) { scs_free(c->bu); } if (c->bl) { scs_free(c->bl); } if (c) { scs_free(c); } } char *SCS(get_cone_header)(const ScsCone *k) { char *tmp = (char *)scs_malloc(sizeof(char) * 512); scs_int i, soc_vars, sd_vars; sprintf(tmp, "cones: "); if (k->z) { sprintf(tmp + strlen(tmp), "\t z: primal zero / dual free vars: %li\n", (long)k->z); } if (k->l) { sprintf(tmp + strlen(tmp), "\t l: linear vars: %li\n", (long)k->l); } if (k->bsize) { sprintf(tmp + strlen(tmp), "\t b: box cone vars: %li\n", (long)(k->bsize)); } soc_vars = 0; if (k->qsize && k->q) { for (i = 0; i < k->qsize; i++) { soc_vars += k->q[i]; } sprintf(tmp + strlen(tmp), "\t q: soc vars: %li, qsize: %li\n", (long)soc_vars, (long)k->qsize); } sd_vars = 0; if (k->ssize && k->s) { for (i = 0; i < k->ssize; i++) { sd_vars += get_sd_cone_size(k->s[i]); } sprintf(tmp + strlen(tmp), "\t s: psd vars: %li, ssize: %li\n", (long)sd_vars, (long)k->ssize); } if (k->ep || k->ed) { sprintf(tmp + strlen(tmp), "\t e: exp vars: %li, dual exp vars: %li\n", (long)(3 * k->ep), (long)(3 * k->ed)); } if (k->psize && k->p) { sprintf(tmp + strlen(tmp), "\t p: primal + dual power vars: %li\n", (long)(3 * k->psize)); } return tmp; } static scs_float exp_newton_one_d(scs_float rho, scs_float y_hat, scs_float z_hat, scs_float w) { scs_float t_prev, t = MAX(w - z_hat, MAX(-z_hat, 1e-9)); scs_float f = 1., fp = 1.; scs_int i; for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { t_prev = t; f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1; fp = (2 * t + z_hat) / rho / rho + 1 / t; t = t - f / fp; if (t <= -z_hat) { t = -z_hat; break; } else if (t <= 0) { t = 0; break; } else if (ABS(t - t_prev) < CONE_TOL) { break; } else if (SQRTF(f * f / fp) < CONE_TOL) { break; } } if (i == EXP_CONE_MAX_ITERS) { scs_printf("warning: exp cone newton step hit maximum %i iters\n", (int)i); scs_printf("rho=%1.5e; y_hat=%1.5e; z_hat=%1.5e; w=%1.5e; f=%1.5e, " "fp=%1.5e, t=%1.5e, t_prev= %1.5e\n", rho, y_hat, z_hat, w, f, fp, t, t_prev); } return t + z_hat; } static void exp_solve_for_x_with_rho(const scs_float *v, scs_float *x, scs_float rho, scs_float w) { x[2] = exp_newton_one_d(rho, v[1], v[2], w); x[1] = (x[2] - v[2]) * x[2] / rho; x[0] = v[0] - rho; } static scs_float exp_calc_grad(const scs_float *v, scs_float *x, scs_float rho, scs_float w) { exp_solve_for_x_with_rho(v, x, rho, w); if (x[1] <= 1e-12) { return x[0]; } return x[0] + x[1] * log(x[1] / x[2]); } static void exp_get_rho_ub(const scs_float *v, scs_float *x, scs_float *ub, scs_float *lb) { *lb = 0; *ub = 0.125; while (exp_calc_grad(v, x, *ub, v[1]) > 0) { *lb = *ub; (*ub) *= 2; } } /* project onto the exponential cone, v has dimension *exactly* 3 */ static scs_int proj_exp_cone(scs_float *v) { scs_int i; scs_float ub, lb, rho, g, x[3]; scs_float r = v[0], s = v[1], t = v[2]; /* v in cl(Kexp) */ if ((s * exp(r / s) - t <= CONE_THRESH && s > 0) || (r <= 0 && s == 0 && t >= 0)) { return 0; } /* -v in Kexp^* */ if ((r > 0 && r * exp(s / r) + exp(1) * t <= CONE_THRESH) || (r == 0 && s <= 0 && t <= 0)) { memset(v, 0, 3 * sizeof(scs_float)); return 0; } /* special case with analytical solution */ if (r < 0 && s < 0) { v[1] = 0.0; v[2] = MAX(v[2], 0); return 0; } /* iterative procedure to find projection, bisects on dual variable: */ exp_get_rho_ub(v, x, &ub, &lb); /* get starting upper and lower bounds */ for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { rho = (ub + lb) / 2; /* halfway between upper and lower bounds */ g = exp_calc_grad(v, x, rho, x[1]); /* calculates gradient wrt dual var */ if (g > 0) { lb = rho; } else { ub = rho; } if (ub - lb < CONE_TOL) { break; } } #if VERBOSITY > 10 scs_printf("exponential cone proj iters %i\n", (int)i); #endif if (i == EXP_CONE_MAX_ITERS) { scs_printf("warning: exp cone outer step hit maximum %i iters\n", (int)i); scs_printf("r=%1.5e; s=%1.5e; t=%1.5e\n", r, s, t); } v[0] = x[0]; v[1] = x[1]; v[2] = x[2]; return 0; } static scs_int set_up_sd_cone_work_space(ScsConeWork *c, const ScsCone *k) { scs_int i; #ifdef USE_LAPACK blas_int n_max = 0; blas_int neg_one = -1; blas_int info = 0; scs_float wkopt = 0.0; #if VERBOSITY > 0 #define _STR_EXPAND(tok) #tok #define _STR(tok) _STR_EXPAND(tok) scs_printf("BLAS(func) = '%s'\n", _STR(BLAS(func))); #endif /* eigenvector decomp workspace */ for (i = 0; i < k->ssize; ++i) { if (k->s[i] > n_max) { n_max = (blas_int)k->s[i]; } } c->Xs = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float)); c->Z = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float)); c->e = (scs_float *)scs_calloc(n_max, sizeof(scs_float)); /* workspace query */ BLAS(syev) ("Vectors", "Lower", &n_max, c->Xs, &n_max, SCS_NULL, &wkopt, &neg_one, &info); if (info != 0) { scs_printf("FATAL: syev failure, info = %li\n", (long)info); return -1; } c->lwork = (blas_int)(wkopt + 1); /* +1 for int casting safety */ c->work = (scs_float *)scs_calloc(c->lwork, sizeof(scs_float)); if (!c->Xs || !c->Z || !c->e || !c->work) { return -1; } return 0; #else for (i = 0; i < k->ssize; i++) { if (k->s[i] > 1) { scs_printf( "FATAL: Cannot solve SDPs without linked blas+lapack libraries\n"); scs_printf( "Install blas+lapack and re-compile SCS with blas+lapack library " "locations\n"); return -1; } } return 0; #endif } /* size of X is get_sd_cone_size(n) */ static scs_int proj_semi_definite_cone(scs_float *X, const scs_int n, ScsConeWork *c) { /* project onto the positive semi-definite cone */ #ifdef USE_LAPACK scs_int i, first_idx; blas_int nb = (blas_int)n; blas_int ncols_z; blas_int nb_plus_one = (blas_int)(n + 1); blas_int one_int = 1; scs_float zero = 0., one = 1.; scs_float sqrt2 = SQRTF(2.0); scs_float sqrt2_inv = 1.0 / sqrt2; scs_float *Xs = c->Xs; scs_float *Z = c->Z; scs_float *e = c->e; scs_float *work = c->work; blas_int lwork = c->lwork; blas_int info = 0; scs_float sq_eig_pos; #endif if (n == 0) { return 0; } if (n == 1) { X[0] = MAX(X[0], 0.); return 0; } #ifdef USE_LAPACK /* copy lower triangular matrix into full matrix */ for (i = 0; i < n; ++i) { memcpy(&(Xs[i * (n + 1)]), &(X[i * n - ((i - 1) * i) / 2]), (n - i) * sizeof(scs_float)); } /* rescale so projection works, and matrix norm preserved see http://www.seas.ucla.edu/~vandenbe/publications/mlbook.pdf pg 3 */ /* scale diags by sqrt(2) */ BLAS(scal)(&nb, &sqrt2, Xs, &nb_plus_one); /* not n_squared */ /* Solve eigenproblem, reuse workspaces */ BLAS(syev)("Vectors", "Lower", &nb, Xs, &nb, e, work, &lwork, &info); if (info != 0) { scs_printf("WARN: LAPACK syev error, info = %i\n", (int)info); if (info < 0) { return info; } } first_idx = -1; /* e is eigvals in ascending order, find first entry > 0 */ for (i = 0; i < n; ++i) { if (e[i] > 0) { first_idx = i; break; } } if (first_idx == -1) { /* there are no positive eigenvalues, set X to 0 and return */ memset(X, 0, sizeof(scs_float) * get_sd_cone_size(n)); return 0; } /* Z is matrix of eigenvectors with positive eigenvalues */ memcpy(Z, &Xs[first_idx * n], sizeof(scs_float) * n * (n - first_idx)); /* scale Z by sqrt(eig) */ for (i = first_idx; i < n; ++i) { sq_eig_pos = SQRTF(e[i]); BLAS(scal)(&nb, &sq_eig_pos, &Z[(i - first_idx) * n], &one_int); } /* Xs = Z Z' = V E V' */ ncols_z = (blas_int)(n - first_idx); BLAS(syrk)("Lower", "NoTrans", &nb, &ncols_z, &one, Z, &nb, &zero, Xs, &nb); /* undo rescaling: scale diags by 1/sqrt(2) */ BLAS(scal)(&nb, &sqrt2_inv, Xs, &nb_plus_one); /* not n_squared */ /* extract just lower triangular matrix */ for (i = 0; i < n; ++i) { memcpy(&(X[i * n - ((i - 1) * i) / 2]), &(Xs[i * (n + 1)]), (n - i) * sizeof(scs_float)); } return 0; #else scs_printf("FAILURE: solving SDP but no blas/lapack libraries were found!\n"); scs_printf("SCS will return nonsense!\n"); SCS(scale_array)(X, NAN, n); return -1; #endif } static scs_float pow_calc_x(scs_float r, scs_float xh, scs_float rh, scs_float a) { scs_float x = 0.5 * (xh + SQRTF(xh * xh + 4 * a * (rh - r) * r)); return MAX(x, 1e-12); } static scs_float pow_calcdxdr(scs_float x, scs_float xh, scs_float rh, scs_float r, scs_float a) { return a * (rh - 2 * r) / (2 * x - xh); } static scs_float pow_calc_f(scs_float x, scs_float y, scs_float r, scs_float a) { return POWF(x, a) * POWF(y, (1 - a)) - r; } static scs_float pow_calc_fp(scs_float x, scs_float y, scs_float dxdr, scs_float dydr, scs_float a) { return POWF(x, a) * POWF(y, (1 - a)) * (a * dxdr / x + (1 - a) * dydr / y) - 1; } /* * Routine to scale the limits of the box cone by the scaling diagonal mat D > 0 * * want (t, s) \in K <==> (t', s') \in K' * * (t', s') = (d0 * t, D s) (overloading D to mean D[1:]) * (up to scalar scaling factor which we can ignore due to conic prooperty) * * K = { (t, s) | t * l <= s <= t * u, t >= 0 } => * { (t, s) | d0 * t * D l / d0 <= D s <= d0 * t D u / d0, t >= 0 } => * { (t', s') | t' * l' <= s' <= t' u', t >= 0 } = K' * where l' = D l / d0, u' = D u / d0. */ static void normalize_box_cone(ScsConeWork *c, scs_float *D, scs_int bsize) { scs_int j; for (j = 0; j < bsize - 1; j++) { if (c->bu[j] >= MAX_BOX_VAL) { c->bu[j] = INFINITY; } else { c->bu[j] = D ? D[j + 1] * c->bu[j] / D[0] : c->bu[j]; } if (c->bl[j] <= -MAX_BOX_VAL) { c->bl[j] = -INFINITY; } else { c->bl[j] = D ? D[j + 1] * c->bl[j] / D[0] : c->bl[j]; } } } /* project onto { (t, s) | t * l <= s <= t * u, t >= 0 }, Newton's method on t tx = [t; s], total length = bsize uses Moreau since \Pi_K*(tx) = \Pi_K(-tx) + tx */ static scs_float proj_box_cone(scs_float *tx, const scs_float *bl, const scs_float *bu, scs_int bsize, scs_float t_warm_start) { scs_float *x, gt, ht, t_prev, t = t_warm_start; scs_int iter, j; if (bsize == 1) { /* special case */ tx[0] = MAX(tx[0], 0.0); return tx[0]; } x = &(tx[1]); /* should only require about 5 or so iterations, 1 or 2 if warm-started */ for (iter = 0; iter < BOX_CONE_MAX_ITERS; iter++) { t_prev = t; /* incorporate the additional BOX_T_SCALE factor into the projection */ gt = BOX_T_SCALE * (t - tx[0]); /* gradient */ ht = BOX_T_SCALE; /* hessian */ for (j = 0; j < bsize - 1; j++) { if (x[j] > t * bu[j]) { gt += (t * bu[j] - x[j]) * bu[j]; /* gradient */ ht += bu[j] * bu[j]; /* hessian */ } else if (x[j] < t * bl[j]) { gt += (t * bl[j] - x[j]) * bl[j]; /* gradient */ ht += bl[j] * bl[j]; /* hessian */ } } t = MAX(t - gt / MAX(ht, 1e-8), 0.); /* newton step */ #if VERBOSITY > 3 scs_printf("iter %i, t_new %1.3e, t_prev %1.3e, gt %1.3e, ht %1.3e\n", iter, t, t_prev, gt, ht); scs_printf("ABS(gt / (ht + 1e-6)) %.4e, ABS(t - t_prev) %.4e\n", ABS(gt / (ht + 1e-6)), ABS(t - t_prev)); #endif /* TODO: sometimes this check can fail (ie, declare convergence before it * should) if ht is very large, which can happen with some pathological * problems. */ if (ABS(gt / MAX(ht, 1e-6)) < 1e-12 * MAX(t, 1.) || ABS(t - t_prev) < 1e-11 * MAX(t, 1.)) { break; } } if (iter == BOX_CONE_MAX_ITERS) { scs_printf("warning: box cone proj hit maximum %i iters\n", (int)iter); } for (j = 0; j < bsize - 1; j++) { if (x[j] > t * bu[j]) { x[j] = t * bu[j]; } else if (x[j] < t * bl[j]) { x[j] = t * bl[j]; } /* x[j] unchanged otherwise */ } tx[0] = t; #if VERBOSITY > 3 scs_printf("box cone iters %i\n", (int)iter + 1); #endif return t; } /* project onto SOC of size q*/ static void proj_soc(scs_float *x, scs_int q) { if (q == 0) { return; } if (q == 1) { x[0] = MAX(x[0], 0.); return; } scs_float v1 = x[0]; scs_float s = SCS(norm_2)(&(x[1]), q - 1); scs_float alpha = (s + v1) / 2.0; if (s <= v1) { return; } else if (s <= -v1) { memset(&(x[0]), 0, q * sizeof(scs_float)); } else { x[0] = alpha; SCS(scale_array)(&(x[1]), alpha / s, q - 1); } } static void proj_power_cone(scs_float *v, scs_float a) { scs_float xh = v[0], yh = v[1], rh = ABS(v[2]); scs_float x = 0.0, y = 0.0, r; scs_int i; /* v in K_a */ if (xh >= 0 && yh >= 0 && CONE_THRESH + POWF(xh, a) * POWF(yh, (1 - a)) >= rh) { return; } /* -v in K_a^* */ if (xh <= 0 && yh <= 0 && CONE_THRESH + POWF(-xh, a) * POWF(-yh, 1 - a) >= rh * POWF(a, a) * POWF(1 - a, 1 - a)) { v[0] = v[1] = v[2] = 0; return; } r = rh / 2; for (i = 0; i < POW_CONE_MAX_ITERS; ++i) { scs_float f, fp, dxdr, dydr; x = pow_calc_x(r, xh, rh, a); y = pow_calc_x(r, yh, rh, 1 - a); f = pow_calc_f(x, y, r, a); if (ABS(f) < CONE_TOL) { break; } dxdr = pow_calcdxdr(x, xh, rh, r, a); dydr = pow_calcdxdr(y, yh, rh, r, (1 - a)); fp = pow_calc_fp(x, y, dxdr, dydr, a); r = MAX(r - f / fp, 0); r = MIN(r, rh); } v[0] = x; v[1] = y; v[2] = (v[2] < 0) ? -(r) : (r); } /* project onto the primal K cone in the paper */ static scs_int proj_cone(scs_float *x, const ScsCone *k, ScsConeWork *c, scs_int normalize) { scs_int i, status; scs_int count = 0; if (k->z) { /* project onto primal zero / dual free cone */ memset(x, 0, k->z * sizeof(scs_float)); count += k->z; } if (k->l) { /* project onto positive orthant */ for (i = count; i < count + k->l; ++i) { x[i] = MAX(x[i], 0.0); } count += k->l; } if (k->bsize) { /* project onto box cone */ if (normalize) { c->box_t_warm_start = proj_box_cone(&(x[count]), c->bl, c->bu, k->bsize, c->box_t_warm_start); } else { c->box_t_warm_start = proj_box_cone(&(x[count]), k->bl, k->bu, k->bsize, c->box_t_warm_start); } count += k->bsize; /* since b = (t,s), len(s) = bsize - 1 */ } if (k->qsize && k->q) { /* project onto second-order cones */ for (i = 0; i < k->qsize; ++i) { proj_soc(&(x[count]), k->q[i]); count += k->q[i]; } } if (k->ssize && k->s) { /* project onto PSD cones */ for (i = 0; i < k->ssize; ++i) { status = proj_semi_definite_cone(&(x[count]), k->s[i], c); if (status < 0) { return status; } count += get_sd_cone_size(k->s[i]); } } if (k->ep) { /* * exponential cone is not self dual, if s \in K * then y \in K^* and so if K is the primal cone * here we project onto K^*, via Moreau * \Pi_C^*(y) = y + \Pi_C(-y) */ #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < k->ep; ++i) { proj_exp_cone(&(x[count + 3 * i])); } count += 3 * k->ep; } if (k->ed) { /* dual exponential cone */ /* * exponential cone is not self dual, if s \in K * then y \in K^* and so if K is the primal cone * here we project onto K^*, via Moreau * \Pi_C^*(y) = y + \Pi_C(-y) */ scs_int idx; scs_float r, s, t; SCS(scale_array)(&(x[count]), -1, 3 * k->ed); /* x = -x; */ #ifdef _OPENMP #pragma omp parallel for private(r, s, t, idx) #endif for (i = 0; i < k->ed; ++i) { idx = count + 3 * i; r = x[idx]; s = x[idx + 1]; t = x[idx + 2]; proj_exp_cone(&(x[idx])); x[idx] -= r; x[idx + 1] -= s; x[idx + 2] -= t; } count += 3 * k->ed; } if (k->psize && k->p) { scs_float v[3]; scs_int idx; /* don't use openmp for power cone ifdef _OPENMP pragma omp parallel for private(v, idx) endif */ for (i = 0; i < k->psize; ++i) { idx = count + 3 * i; if (k->p[i] >= 0) { /* primal power cone */ proj_power_cone(&(x[idx]), k->p[i]); } else { /* dual power cone, using Moreau */ v[0] = -x[idx]; v[1] = -x[idx + 1]; v[2] = -x[idx + 2]; proj_power_cone(v, -k->p[i]); x[idx] += v[0]; x[idx + 1] += v[1]; x[idx + 2] += v[2]; } } count += 3 * k->psize; } /* project onto OTHER cones */ return 0; } ScsConeWork *SCS(init_cone)(const ScsCone *k, const ScsScaling *scal, scs_int cone_len) { ScsConeWork *c = (ScsConeWork *)scs_calloc(1, sizeof(ScsConeWork)); c->cone_len = cone_len; c->s = (scs_float *)scs_calloc(cone_len, sizeof(scs_float)); if (k->bsize && k->bu && k->bl) { c->box_t_warm_start = 1.; if (scal) { c->bu = (scs_float *)scs_calloc(k->bsize - 1, sizeof(scs_float)); c->bl = (scs_float *)scs_calloc(k->bsize - 1, sizeof(scs_float)); memcpy(c->bu, k->bu, (k->bsize - 1) * sizeof(scs_float)); memcpy(c->bl, k->bl, (k->bsize - 1) * sizeof(scs_float)); /* also does some sanitizing */ normalize_box_cone(c, scal ? &(scal->D[k->z + k->l]) : SCS_NULL, k->bsize); } } if (k->ssize && k->s) { if (set_up_sd_cone_work_space(c, k) < 0) { SCS(finish_cone)(c); return SCS_NULL; } } return c; } /* outward facing cone projection routine performs projection in-place if normalize > 0 then will use normalized (equilibrated) cones if applicable. */ scs_int SCS(proj_dual_cone)(scs_float *x, const ScsCone *k, ScsConeWork *c, scs_int normalize) { scs_int status; /* copy x, s = x */ memcpy(c->s, x, c->cone_len * sizeof(scs_float)); /* negate x -> -x */ SCS(scale_array)(x, -1., c->cone_len); /* project -x onto cone, x -> Pi_K(-x) */ status = proj_cone(x, k, c, normalize); /* return Pi_K*(x) = s + Pi_K(-x) */ SCS(add_scaled_array)(x, c->s, c->cone_len, 1.); return status; }
utils.h
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Dhiraj Kalamkar (Intel Corp.) ******************************************************************************/ #ifndef _UTILS_H_ #define _UTILS_H_ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_num_threads() (1) #define omp_get_thread_num() (0) #define omp_get_max_threads() (1) #endif const int alignment = 64; typedef long ITyp; typedef float FTyp; typedef uint16_t Half; extern thread_local struct drand48_data rand_buf; static double get_time() { static bool init_done = false; static struct timespec stp = {0,0}; struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); /*clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);*/ if(!init_done) { init_done = true; stp = tp; } double ret = (tp.tv_sec - stp.tv_sec) * 1e3 + (tp.tv_nsec - stp.tv_nsec)*1e-6; return ret; } void set_random_seed(int seed); template<typename T> void init_zero(size_t sz, T *buf) { #pragma omp parallel for for(size_t i = 0; i < sz; i++) buf[i] = (T)0; } template<typename T> void init_random(size_t sz, T *buf, T low, T high) { T range = high - low; #pragma omp parallel for schedule(static) for(size_t i = 0; i < sz; i++) { double randval; drand48_r(&rand_buf, &randval); buf[i] = randval * range - low; } } inline void *my_malloc(size_t sz, size_t align) { return _mm_malloc(sz, align); } inline void my_free(void *p) { _mm_free(p); } #endif /*_UTILS_H_*/
HelloOpenMP_fix3.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]){ #pragma omp parallel { int nthreads = omp_get_num_threads(); int thread_id = omp_get_thread_num(); #pragma omp single { printf("Goodbye slow serial world and Hello OpenMP!\n"); printf(" I have %d thread(s) and my thread id is %d\n",nthreads,thread_id); } } }
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { if (stype == kDefaultStorage) { merge_buf_[key].merged = NDArray(shape, pinned_ctx_, false, type); } else { merge_buf_[key].merged = NDArray(stype, shape, pinned_ctx_, true, type); } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (src[0].storage_type() == kDefaultStorage) { return src[0]; } else { // if sparse and only one GPU, always update weight on CPU CopyFromTo(src[0], &buf.merged, priority); return buf.merged; } } if (buf.merged.storage_type() == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf.merged, priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate NDArray based on storage type buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // buf.merged is a sparse ndarray. std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } NDArray result = buf.merged; Resource rsc = ResourceManager::Get()->Request(result.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, result, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = result; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {result.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // first copy data to cpu, then broadcast auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) CopyFromTo(buf.merged, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_to_gpu = out->ctx().dev_mask() == Context::kGPU; NDArray retained_cpu = is_to_gpu ? NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()) : *out; Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype, stype); } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = buf.merged.storage_type(); if (stype == kDefaultStorage) { CopyFromTo(src[0], &(buf.merged), priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } } else { if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( buf.merged.storage_type(), buf.merged.shape(), buf.merged.ctx(), true, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } } ElementwiseSum(reduce, &buf.merged, priority); return buf.merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) { CopyFromTo(buf.merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_diff_ctx = out->ctx() != src.ctx(); NDArray out_gpu = is_diff_ctx? NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()) : *out; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = out_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_CUDA case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, out_gpu.ctx(), {src.var(), row_id.var()}, {out_gpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); CopyFromTo(out_gpu, out, priority); } } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); for (int i = 0; i < n; ++i) { cudaSetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } using KeyAttrs = std::tuple<int, TShape, int, NDArrayStorageType>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) { const int key = std::get<0>(sorted_key_attrs_[i]); const TShape& shape = std::get<1>(sorted_key_attrs_[i]); const int type = std::get<2>(sorted_key_attrs_[i]); const NDArrayStorageType stype = std::get<3>(sorted_key_attrs_[i]); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } if (stype == kDefaultStorage) { buf.merged = NDArray(shape, ctx, false, type); } else { buf.merged = NDArray(stype, shape, ctx, true, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } std::vector<KeyAttrs> sorted_key_attrs_; /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the gpu buffer std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; bool inited_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
GB_unop__floor_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__floor_fc64_fc64 // op(A') function: GB_unop_tran__floor_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cfloor (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cfloor (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cfloor (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__floor_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__floor_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blockchain.c
/********************************************************************* Homework 5 CS 110: Computer Architecture, Spring 2021 ShanghaiTech University * Last Modified: 03/28/2021 *********************************************************************/ #include "blockchain.h" #include <stdlib.h> #include <string.h> #include <omp.h> #include <stdio.h> #define MAX_UINT_64 0xffffffffffffffff void blockchain_node_init(blk_t *node, uint32_t index, uint32_t timestamp, unsigned char prev_hash[32], unsigned char *data, size_t data_size) { if (!node || !data || !prev_hash) return; node->header.index = index; node->header.timestamp = timestamp; node->header.nonce = -1; memset(node->header.data, 0, sizeof(unsigned char) * 256); memcpy(node->header.prev_hash, prev_hash, HASH_BLOCK_SIZE); memcpy(node->header.data, data, sizeof(unsigned char) * ((data_size < 256) ? data_size : 256)); } void blockchain_node_hash(blk_t *node, unsigned char hash_buf[HASH_BLOCK_SIZE], hash_func func) { if (node) func((unsigned char *)node, sizeof(blkh_t), (unsigned char *)hash_buf); } BOOL blockchain_node_verify(blk_t *node, blk_t *prev_node, hash_func func) { unsigned char hash_buf[HASH_BLOCK_SIZE]; if (!node || !prev_node) return False; blockchain_node_hash(node, hash_buf, func); if (memcmp(node->hash, hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE)) return False; blockchain_node_hash(prev_node, hash_buf, func); if (memcmp(node->header.prev_hash, hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE)) return False; return True; } /* The sequiental implementation of mining implemented for you. */ void blockchain_node_mine(blk_t *node, unsigned char hash_buf[HASH_BLOCK_SIZE], size_t diff, hash_func func) { unsigned char one_diff[HASH_BLOCK_SIZE]; size_t diff_q, diff_m; diff_q = diff / 8; diff_m = diff % 8; memset(one_diff, 0xFF, sizeof(unsigned char) * HASH_BLOCK_SIZE); memset(one_diff, 0, sizeof(unsigned char) * diff_q); one_diff[diff_q] = ((uint8_t)0xFF) >> diff_m; BOOL nonce_flag = False; unsigned long base = 0; /* Set threads according to cpu */ #pragma omp { if (omp_get_num_threads() == 20) { omp_set_num_threads(20); } else { omp_set_num_threads(4); } } for (base = 0; base < MAX_UINT_64 && !nonce_flag && base != MAX_UINT_64; base += 0x2000) { #pragma omp parallel shared(nonce_flag) { /* Buffer for hash and node */ blk_t private_node; unsigned char private_hash_buf[HASH_BLOCK_SIZE]; /* Init private node */ memcpy(&private_node, node, sizeof(blk_t)); #pragma omp for schedule(dynamic) for (unsigned long i = base; i < base + 0x2000; i++) { if (nonce_flag) continue; blockchain_node_hash(&private_node, private_hash_buf, func); if ((!memcmp(private_hash_buf, one_diff, sizeof(unsigned char) * diff_q)) && memcmp(&private_hash_buf[diff_q], &one_diff[diff_q], sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0) { { if (nonce_flag) continue; /* Set nonce flag */ nonce_flag = True; /* Save back result */ node->header.nonce = private_node.header.nonce; memcpy(node->hash, private_hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE); } } private_node.header.nonce = base + i; } } } /* Store calculated hash into hash_buf */ memcpy(hash_buf, node->hash, sizeof(unsigned char) * HASH_BLOCK_SIZE); }
convolution_3x3_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch) { const int packn = csrr_vlenb() / 2; // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array __fp16 tmp[8][8][packn]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * packn; for (int m = 0; m < 8; m++) { vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl); vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _r06 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _r07 = vle16_v_f16m1(r0 + packn * 7, vl); vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r00, _r06, vl), 5.25f, vfsub_vv_f16m1(_r04, _r02, vl), vl); vfloat16m1_t _tmp7m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r07, _r01, vl), 5.25f, vfsub_vv_f16m1(_r03, _r05, vl), vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[7][m], _tmp7m, vl); vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r02, _r06, vl), -4.25f, _r04, vl); vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r01, _r05, vl), -4.25f, _r03, vl); vfloat16m1_t _tmp1m = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _tmp2m = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl); vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl); vfloat16m1_t _tmp3m = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _tmp4m = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_r06, 4.f, vfmacc_vf_f16m1(_r02, -1.25f, _r04, vl), vl); vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl); vfloat16m1_t _tmp5m = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl); vfloat16m1_t _tmp6m = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); vse16_v_f16m1(tmp[6][m], _tmp6m, vl); r0 += w * packn; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * packn; __fp16* r0_tm_1 = r0_tm_0 + tiles * packn; __fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2; __fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3; __fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4; __fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5; __fp16* r0_tm_6 = r0_tm_0 + tiles * packn * 6; __fp16* r0_tm_7 = r0_tm_0 + tiles * packn * 7; for (int m = 0; m < 8; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl); vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl); vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f16m1(_tmp04, _tmp02, vl), vl); vfloat16m1_t _r0tm7 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f16m1(_tmp03, _tmp05, vl), vl); vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl); vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl); vfloat16m1_t _r0tm1 = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _r0tm2 = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl); vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl); vfloat16m1_t _r0tm3 = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _r0tm4 = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_tmp06, 4.f, vfmacc_vf_f16m1(_tmp02, -1.25f, _tmp04, vl), vl); vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl); vfloat16m1_t _r0tm5 = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl); vfloat16m1_t _r0tm6 = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl); vse16_v_f16m1(r0_tm_0, _r0tm0, vl); vse16_v_f16m1(r0_tm_1, _r0tm1, vl); vse16_v_f16m1(r0_tm_2, _r0tm2, vl); vse16_v_f16m1(r0_tm_3, _r0tm3, vl); vse16_v_f16m1(r0_tm_4, _r0tm4, vl); vse16_v_f16m1(r0_tm_5, _r0tm5, vl); vse16_v_f16m1(r0_tm_6, _r0tm6, vl); vse16_v_f16m1(r0_tm_7, _r0tm7, vl); r0_tm_0 += tiles * packn * 8; r0_tm_1 += tiles * packn * 8; r0_tm_2 += tiles * packn * 8; r0_tm_3 += tiles * packn * 8; r0_tm_4 += tiles * packn * 8; r0_tm_5 += tiles * packn * 8; r0_tm_6 += tiles * packn * 8; r0_tm_7 += tiles * packn * 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); // NOTE c99 variable length array __fp16 tmp[6][8][packn]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * packn; const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn; const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5; const __fp16* output0_tm_6 = output0_tm_0 + tiles * packn * 6; const __fp16* output0_tm_7 = output0_tm_0 + tiles * packn * 7; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * packn; // TODO rvv optimize for (int m = 0; m < 8; m++) { vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl); vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl); vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl); vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl); vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl); vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl); vfloat16m1_t _out0tm6 = vle16_v_f16m1(output0_tm_6, vl); vfloat16m1_t _out0tm7 = vle16_v_f16m1(output0_tm_7, vl); vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_out0tm5, _out0tm6, vl); vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_out0tm5, _out0tm6, vl); vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl); vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl); vfloat16m1_t _tmp5m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); output0_tm_0 += tiles * packn * 8; output0_tm_1 += tiles * packn * 8; output0_tm_2 += tiles * packn * 8; output0_tm_3 += tiles * packn * 8; output0_tm_4 += tiles * packn * 8; output0_tm_5 += tiles * packn * 8; output0_tm_6 += tiles * packn * 8; output0_tm_7 += tiles * packn * 8; } for (int m = 0; m < 6; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl); vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl); vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_tmp05, _tmp06, vl); vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_tmp05, _tmp06, vl); vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl); vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl); vfloat16m1_t _out04 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl); vse16_v_f16m1(output0, _out00, vl); vse16_v_f16m1(output0 + packn * 2, _out02, vl); vse16_v_f16m1(output0 + packn * 4, _out04, vl); vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl); vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl); vfloat16m1_t _out05 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp07, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl); vse16_v_f16m1(output0 + packn, _out01, vl); vse16_v_f16m1(output0 + packn * 3, _out03, vl); vse16_v_f16m1(output0 + packn * 5, _out05, vl); output0 += outw * packn; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch) { const int packn = csrr_vlenb() / 2; // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 36; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array __fp16 tmp[6][6][packn]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * packn; for (int m = 0; m < 6; m++) { vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl); vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r04, _r03, vl), -4.f, vfadd_vv_f16m1(_r01, _r02, vl), vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r03, vl), 4.f, vfsub_vv_f16m1(_r01, _r02, vl), vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), -2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl); vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), 2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl); vfloat16m1_t _tmp5m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); r0 += w * packn; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 6 + j) * packn; __fp16* r0_tm_1 = r0_tm_0 + tiles * packn; __fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2; __fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3; __fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4; __fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5; for (int m = 0; m < 6; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl); vfloat16m1_t _r0tm1 = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f16m1(_tmp01, _tmp02, vl), vl); vfloat16m1_t _r0tm2 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f16m1(_tmp01, _tmp02, vl), vl); vfloat16m1_t _r0tm3 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl); vfloat16m1_t _r0tm4 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl); vfloat16m1_t _r0tm5 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl); vse16_v_f16m1(r0_tm_0, _r0tm0, vl); vse16_v_f16m1(r0_tm_1, _r0tm1, vl); vse16_v_f16m1(r0_tm_2, _r0tm2, vl); vse16_v_f16m1(r0_tm_3, _r0tm3, vl); vse16_v_f16m1(r0_tm_4, _r0tm4, vl); vse16_v_f16m1(r0_tm_5, _r0tm5, vl); r0_tm_0 += tiles * packn * 6; r0_tm_1 += tiles * packn * 6; r0_tm_2 += tiles * packn * 6; r0_tm_3 += tiles * packn * 6; r0_tm_4 += tiles * packn * 6; r0_tm_5 += tiles * packn * 6; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); // NOTE variable length array __fp16 tmp[4][6][packn]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 6 + j) * packn; const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn; const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5; __fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * packn; // TODO rvv optimize for (int m = 0; m < 6; m++) { vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl); vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl); vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl); vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl); vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl); vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl); vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp02a, vl), _tmp02b, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); output0_tm_0 += tiles * packn * 6; output0_tm_1 += tiles * packn * 6; output0_tm_2 += tiles * packn * 6; output0_tm_3 += tiles * packn * 6; output0_tm_4 += tiles * packn * 6; output0_tm_5 += tiles * packn * 6; } for (int m = 0; m < 4; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl); vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl), vl); vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl), vl); vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl); vse16_v_f16m1(output0, _out00, vl); vse16_v_f16m1(output0 + packn, _out01, vl); vse16_v_f16m1(output0 + packn * 2, _out02, vl); vse16_v_f16m1(output0 + packn * 3, _out03, vl); output0 += outw * packn; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
5115.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp target teams distribute for (i = 1; i < n+1; i++) { #LOOP2 for (j = 1; j < n+1; j++) { #LOOP3 for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
mandel-omp-task.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ #pragma omp parallel #pragma omp single for (int row = 0; row < height; ++row) { for (int col = 0; col < width; ++col) { #pragma omp task firstprivate(row, col) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } #pragma omp taskwait } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("parallel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
convolution_3x3_pack1to4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00_0 = vld1q_f32(k0); float32x4_t _k01_0 = vld1q_f32(k0 + 4); float32x4_t _k02_0 = vld1q_f32(k0 + 8); float32x4_t _k10_0 = vld1q_f32(k0 + 12); float32x4_t _k11_0 = vld1q_f32(k0 + 16); float32x4_t _k12_0 = vld1q_f32(k0 + 20); float32x4_t _k20_0 = vld1q_f32(k0 + 24); float32x4_t _k21_0 = vld1q_f32(k0 + 28); float32x4_t _k22_0 = vld1q_f32(k0 + 32); float32x4_t _k00_1 = vld1q_f32(k1); float32x4_t _k01_1 = vld1q_f32(k1 + 4); float32x4_t _k02_1 = vld1q_f32(k1 + 8); float32x4_t _k10_1 = vld1q_f32(k1 + 12); float32x4_t _k11_1 = vld1q_f32(k1 + 16); float32x4_t _k12_1 = vld1q_f32(k1 + 20); float32x4_t _k20_1 = vld1q_f32(k1 + 24); float32x4_t _k21_1 = vld1q_f32(k1 + 28); float32x4_t _k22_1 = vld1q_f32(k1 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "ld1 {v1.2s}, [%2] \n" "fmla v24.4s, %10.4s, v0.s[0] \n" "fmla v25.4s, %10.4s, v0.s[1] \n" "fmla v26.4s, %10.4s, v0.s[2] \n" "fmla v27.4s, %10.4s, v0.s[3] \n" "fmla v28.4s, %19.4s, v0.s[0] \n" "fmla v29.4s, %19.4s, v0.s[1] \n" "fmla v30.4s, %19.4s, v0.s[2] \n" "fmla v31.4s, %19.4s, v0.s[3] \n" "fmla v24.4s, %11.4s, v0.s[1] \n" "fmla v25.4s, %11.4s, v0.s[2] \n" "fmla v26.4s, %11.4s, v0.s[3] \n" "fmla v27.4s, %11.4s, v1.s[0] \n" "fmla v28.4s, %20.4s, v0.s[1] \n" "fmla v29.4s, %20.4s, v0.s[2] \n" "fmla v30.4s, %20.4s, v0.s[3] \n" "fmla v31.4s, %20.4s, v1.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.2s}, [%3] \n" "fmla v24.4s, %12.4s, v0.s[2] \n" "fmla v25.4s, %12.4s, v0.s[3] \n" "fmla v26.4s, %12.4s, v1.s[0] \n" "fmla v27.4s, %12.4s, v1.s[1] \n" "fmla v28.4s, %21.4s, v0.s[2] \n" "fmla v29.4s, %21.4s, v0.s[3] \n" "fmla v30.4s, %21.4s, v1.s[0] \n" "fmla v31.4s, %21.4s, v1.s[1] \n" "fmla v24.4s, %13.4s, v2.s[0] \n" "fmla v25.4s, %13.4s, v2.s[1] \n" "fmla v26.4s, %13.4s, v2.s[2] \n" "fmla v27.4s, %13.4s, v2.s[3] \n" "fmla v28.4s, %22.4s, v2.s[0] \n" "fmla v29.4s, %22.4s, v2.s[1] \n" "fmla v30.4s, %22.4s, v2.s[2] \n" "fmla v31.4s, %22.4s, v2.s[3] \n" "fmla v24.4s, %14.4s, v2.s[1] \n" "fmla v25.4s, %14.4s, v2.s[2] \n" "fmla v26.4s, %14.4s, v2.s[3] \n" "fmla v27.4s, %14.4s, v3.s[0] \n" "fmla v28.4s, %23.4s, v2.s[1] \n" "fmla v29.4s, %23.4s, v2.s[2] \n" "fmla v30.4s, %23.4s, v2.s[3] \n" "fmla v31.4s, %23.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4s}, [%4], #16 \n" "ld1 {v1.2s}, [%4] \n" "fmla v24.4s, %15.4s, v2.s[2] \n" "fmla v25.4s, %15.4s, v2.s[3] \n" "fmla v26.4s, %15.4s, v3.s[0] \n" "fmla v27.4s, %15.4s, v3.s[1] \n" "fmla v28.4s, %24.4s, v2.s[2] \n" "fmla v29.4s, %24.4s, v2.s[3] \n" "fmla v30.4s, %24.4s, v3.s[0] \n" "fmla v31.4s, %24.4s, v3.s[1] \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %16.4s, v0.s[2] \n" "fmla v27.4s, %16.4s, v0.s[3] \n" "fmla v28.4s, %25.4s, v0.s[0] \n" "fmla v29.4s, %25.4s, v0.s[1] \n" "fmla v30.4s, %25.4s, v0.s[2] \n" "fmla v31.4s, %25.4s, v0.s[3] \n" "fmla v24.4s, %17.4s, v0.s[1] \n" "fmla v25.4s, %17.4s, v0.s[2] \n" "fmla v26.4s, %17.4s, v0.s[3] \n" "fmla v27.4s, %17.4s, v1.s[0] \n" "fmla v28.4s, %26.4s, v0.s[1] \n" "fmla v29.4s, %26.4s, v0.s[2] \n" "fmla v30.4s, %26.4s, v0.s[3] \n" "fmla v31.4s, %26.4s, v1.s[0] \n" "fmla v24.4s, %18.4s, v0.s[2] \n" "fmla v25.4s, %18.4s, v0.s[3] \n" "fmla v26.4s, %18.4s, v1.s[0] \n" "fmla v27.4s, %18.4s, v1.s[1] \n" "fmla v28.4s, %27.4s, v0.s[2] \n" "fmla v29.4s, %27.4s, v0.s[3] \n" "fmla v30.4s, %27.4s, v1.s[0] \n" "fmla v31.4s, %27.4s, v1.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1], #64 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v24.4s, v25.4s}, [%0] \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v26.4s, v27.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2] \n" "add %2, %2, #8 \n" "fmla v24.4s, %10.4s, v0.s[0] \n" "fmla v25.4s, %10.4s, v0.s[1] \n" "fmla v26.4s, %19.4s, v0.s[0] \n" "fmla v27.4s, %19.4s, v0.s[1] \n" "fmla v24.4s, %11.4s, v0.s[1] \n" "fmla v25.4s, %11.4s, v0.s[2] \n" "fmla v26.4s, %20.4s, v0.s[1] \n" "fmla v27.4s, %20.4s, v0.s[2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v1.4s}, [%3] \n" "fmla v24.4s, %12.4s, v0.s[2] \n" "fmla v25.4s, %12.4s, v0.s[3] \n" "fmla v26.4s, %21.4s, v0.s[2] \n" "fmla v27.4s, %21.4s, v0.s[3] \n" "add %3, %3, #8 \n" "fmla v24.4s, %13.4s, v1.s[0] \n" "fmla v25.4s, %13.4s, v1.s[1] \n" "fmla v26.4s, %22.4s, v1.s[0] \n" "fmla v27.4s, %22.4s, v1.s[1] \n" "fmla v24.4s, %14.4s, v1.s[1] \n" "fmla v25.4s, %14.4s, v1.s[2] \n" "fmla v26.4s, %23.4s, v1.s[1] \n" "fmla v27.4s, %23.4s, v1.s[2] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4s}, [%4] \n" "fmla v24.4s, %15.4s, v1.s[2] \n" "fmla v25.4s, %15.4s, v1.s[3] \n" "fmla v26.4s, %24.4s, v1.s[2] \n" "fmla v27.4s, %24.4s, v1.s[3] \n" "add %4, %4, #8 \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %25.4s, v0.s[0] \n" "fmla v27.4s, %25.4s, v0.s[1] \n" "fmla v24.4s, %17.4s, v0.s[1] \n" "fmla v25.4s, %17.4s, v0.s[2] \n" "fmla v26.4s, %26.4s, v0.s[1] \n" "fmla v27.4s, %26.4s, v0.s[2] \n" "fmla v24.4s, %18.4s, v0.s[2] \n" "fmla v25.4s, %18.4s, v0.s[3] \n" "fmla v26.4s, %27.4s, v0.s[2] \n" "fmla v27.4s, %27.4s, v0.s[3] \n" "st1 {v24.4s, v25.4s}, [%0], #32 \n" "st1 {v26.4s, v27.4s}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { float32x4_t _sum00 = vld1q_f32(outptr0); float32x4_t _sum10 = vld1q_f32(outptr1); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); _sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2); vst1q_f32(outptr0, _sum00); vst1q_f32(outptr1, _sum10); r0 += 1; r1 += 1; r2 += 1; outptr0 += 4; outptr1 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1], #32 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "fmla v28.4s, %8.4s, v1.s[0] \n" "fmla v29.4s, %8.4s, v1.s[1] \n" "fmla v30.4s, %8.4s, v1.s[2] \n" "fmla v31.4s, %8.4s, v1.s[3] \n" "ld1 {v2.2s}, [%1] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "fmla v28.4s, %9.4s, v1.s[1] \n" "fmla v29.4s, %9.4s, v1.s[2] \n" "fmla v30.4s, %9.4s, v1.s[3] \n" "fmla v31.4s, %9.4s, v2.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4s, v5.4s}, [%2], #32 \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "fmla v28.4s, %10.4s, v1.s[2] \n" "fmla v29.4s, %10.4s, v1.s[3] \n" "fmla v30.4s, %10.4s, v2.s[0] \n" "fmla v31.4s, %10.4s, v2.s[1] \n" "ld1 {v2.2s}, [%2] \n" "fmla v24.4s, %11.4s, v4.s[0] \n" "fmla v25.4s, %11.4s, v4.s[1] \n" "fmla v26.4s, %11.4s, v4.s[2] \n" "fmla v27.4s, %11.4s, v4.s[3] \n" "fmla v28.4s, %11.4s, v5.s[0] \n" "fmla v29.4s, %11.4s, v5.s[1] \n" "fmla v30.4s, %11.4s, v5.s[2] \n" "fmla v31.4s, %11.4s, v5.s[3] \n" "fmla v24.4s, %12.4s, v4.s[1] \n" "fmla v25.4s, %12.4s, v4.s[2] \n" "fmla v26.4s, %12.4s, v4.s[3] \n" "fmla v27.4s, %12.4s, v5.s[0] \n" "fmla v28.4s, %12.4s, v5.s[1] \n" "fmla v29.4s, %12.4s, v5.s[2] \n" "fmla v30.4s, %12.4s, v5.s[3] \n" "fmla v31.4s, %12.4s, v2.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" "fmla v24.4s, %13.4s, v4.s[2] \n" "fmla v25.4s, %13.4s, v4.s[3] \n" "fmla v26.4s, %13.4s, v5.s[0] \n" "fmla v27.4s, %13.4s, v5.s[1] \n" "fmla v28.4s, %13.4s, v5.s[2] \n" "fmla v29.4s, %13.4s, v5.s[3] \n" "fmla v30.4s, %13.4s, v2.s[0] \n" "fmla v31.4s, %13.4s, v2.s[1] \n" "ld1 {v2.2s}, [%3] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "fmla v28.4s, %14.4s, v1.s[0] \n" "fmla v29.4s, %14.4s, v1.s[1] \n" "fmla v30.4s, %14.4s, v1.s[2] \n" "fmla v31.4s, %14.4s, v1.s[3] \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v28.4s, %15.4s, v1.s[1] \n" "fmla v29.4s, %15.4s, v1.s[2] \n" "fmla v30.4s, %15.4s, v1.s[3] \n" "fmla v31.4s, %15.4s, v2.s[0] \n" "sub %0, %0, #64 \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "fmla v28.4s, %16.4s, v1.s[2] \n" "fmla v29.4s, %16.4s, v1.s[3] \n" "fmla v30.4s, %16.4s, v2.s[0] \n" "fmla v31.4s, %16.4s, v2.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #endif // __aarch64__ for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "ld1 {v1.2s}, [%1] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "ld1 {v3.2s}, [%2] \n" "fmla v24.4s, %11.4s, v2.s[0] \n" "fmla v25.4s, %11.4s, v2.s[1] \n" "fmla v26.4s, %11.4s, v2.s[2] \n" "fmla v27.4s, %11.4s, v2.s[3] \n" "fmla v24.4s, %12.4s, v2.s[1] \n" "fmla v25.4s, %12.4s, v2.s[2] \n" "fmla v26.4s, %12.4s, v2.s[3] \n" "fmla v27.4s, %12.4s, v3.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" "fmla v24.4s, %13.4s, v2.s[2] \n" "fmla v25.4s, %13.4s, v2.s[3] \n" "fmla v26.4s, %13.4s, v3.s[0] \n" "fmla v27.4s, %13.4s, v3.s[1] \n" "ld1 {v1.2s}, [%3] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "vmla.f32 q12, %q8, d0[0] \n" "vmla.f32 q13, %q8, d0[1] \n" "vmla.f32 q14, %q8, d1[0] \n" "vmla.f32 q15, %q8, d1[1] \n" "vld1.f32 {d2}, [%1] \n" "vmla.f32 q12, %q9, d0[1] \n" "vmla.f32 q13, %q9, d1[0] \n" "vmla.f32 q14, %q9, d1[1] \n" "vmla.f32 q15, %q9, d2[0] \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "vmla.f32 q12, %q10, d1[0] \n" "vmla.f32 q13, %q10, d1[1] \n" "vmla.f32 q14, %q10, d2[0] \n" "vmla.f32 q15, %q10, d2[1] \n" "vmla.f32 q12, %q11, d4[0] \n" "vmla.f32 q13, %q11, d4[1] \n" "vmla.f32 q14, %q11, d5[0] \n" "vmla.f32 q15, %q11, d5[1] \n" "vld1.f32 {d3}, [%2] \n" "vmla.f32 q12, %q12, d4[1] \n" "vmla.f32 q13, %q12, d5[0] \n" "vmla.f32 q14, %q12, d5[1] \n" "vmla.f32 q15, %q12, d3[0] \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3]! \n" "vmla.f32 q12, %q13, d5[0] \n" "vmla.f32 q13, %q13, d5[1] \n" "vmla.f32 q14, %q13, d3[0] \n" "vmla.f32 q15, %q13, d3[1] \n" "vmla.f32 q12, %q14, d0[0] \n" "vmla.f32 q13, %q14, d0[1] \n" "vmla.f32 q14, %q14, d1[0] \n" "vmla.f32 q15, %q14, d1[1] \n" "vld1.f32 {d2}, [%3] \n" "vmla.f32 q12, %q15, d0[1] \n" "vmla.f32 q13, %q15, d1[0] \n" "vmla.f32 q14, %q15, d1[1] \n" "vmla.f32 q15, %q15, d2[0] \n" "vmla.f32 q12, %q16, d1[0] \n" "vmla.f32 q13, %q16, d1[1] \n" "vmla.f32 q14, %q16, d2[0] \n" "vmla.f32 q15, %q16, d2[1] \n" "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v24.4s, v25.4s}, [%0] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmul v26.4s, %8.4s, v0.s[0] \n" "fmul v27.4s, %8.4s, v0.s[1] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v1.4s}, [%2] \n" "fmla v26.4s, %10.4s, v0.s[2] \n" "fmla v27.4s, %10.4s, v0.s[3] \n" "fmla v24.4s, %11.4s, v1.s[0] \n" "fmla v25.4s, %11.4s, v1.s[1] \n" "add %1, %1, #8 \n" "fmla v26.4s, %12.4s, v1.s[1] \n" "fmla v27.4s, %12.4s, v1.s[2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3] \n" "fmla v24.4s, %13.4s, v1.s[2] \n" "fmla v25.4s, %13.4s, v1.s[3] \n" "fmla v26.4s, %14.4s, v0.s[0] \n" "fmla v27.4s, %14.4s, v0.s[1] \n" "add %2, %2, #8 \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %16.4s, v0.s[2] \n" "fmla v27.4s, %16.4s, v0.s[3] \n" "add %3, %3, #8 \n" "fadd v24.4s, v24.4s, v26.4s \n" "fadd v25.4s, v25.4s, v27.4s \n" "st1 {v24.4s, v25.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmul.f32 q14, %q8, d0[0] \n" "vmul.f32 q15, %q8, d0[1] \n" "vmla.f32 q12, %q9, d0[1] \n" "vmla.f32 q13, %q9, d1[0] \n" "pld [%2, #128] \n" "vld1.f32 {d2-d3}, [%2] \n" "vmla.f32 q14, %q10, d1[0] \n" "vmla.f32 q15, %q10, d1[1] \n" "vmla.f32 q12, %q11, d2[0] \n" "vmla.f32 q13, %q11, d2[1] \n" "add %1, %1, #8 \n" "vmla.f32 q14, %q12, d2[1] \n" "vmla.f32 q15, %q12, d3[0] \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3] \n" "vmla.f32 q12, %q13, d3[0] \n" "vmla.f32 q13, %q13, d3[1] \n" "vmla.f32 q14, %q14, d0[0] \n" "vmla.f32 q15, %q14, d0[1] \n" "add %2, %2, #8 \n" "vmla.f32 q12, %q15, d0[1] \n" "vmla.f32 q13, %q15, d1[0] \n" "vmla.f32 q14, %q16, d1[0] \n" "vmla.f32 q15, %q16, d1[1] \n" "add %3, %3, #8 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1q_f32(outptr0, _sum0); r0 += 1; r1 += 1; r2 += 1; outptr0 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; } } } static void conv3x3s2_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00_0 = vld1q_f32(k0); float32x4_t _k01_0 = vld1q_f32(k0 + 4); float32x4_t _k02_0 = vld1q_f32(k0 + 8); float32x4_t _k10_0 = vld1q_f32(k0 + 12); float32x4_t _k11_0 = vld1q_f32(k0 + 16); float32x4_t _k12_0 = vld1q_f32(k0 + 20); float32x4_t _k20_0 = vld1q_f32(k0 + 24); float32x4_t _k21_0 = vld1q_f32(k0 + 28); float32x4_t _k22_0 = vld1q_f32(k0 + 32); float32x4_t _k00_1 = vld1q_f32(k1); float32x4_t _k01_1 = vld1q_f32(k1 + 4); float32x4_t _k02_1 = vld1q_f32(k1 + 8); float32x4_t _k10_1 = vld1q_f32(k1 + 12); float32x4_t _k11_1 = vld1q_f32(k1 + 16); float32x4_t _k12_1 = vld1q_f32(k1 + 20); float32x4_t _k20_1 = vld1q_f32(k1 + 24); float32x4_t _k21_1 = vld1q_f32(k1 + 28); float32x4_t _k22_1 = vld1q_f32(k1 + 32); int i = 0; for (; i < outh; i++) { int nn = outw >> 2; int remain = outw & 3; if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n" // sum0 // r0 "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" "ld1r {v4.4s}, [%3] \n" "fmla v6.4s, %12.4s, v0.s[0] \n" "fmla v7.4s, %12.4s, v0.s[2] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n" // sum1 "fmla v8.4s, %12.4s, v1.s[0] \n" "fmla v9.4s, %12.4s, v1.s[2] \n" "fmla v10.4s, %21.4s, v0.s[0] \n" "fmla v11.4s, %21.4s, v0.s[2] \n" "fmla v12.4s, %21.4s, v1.s[0] \n" "fmla v13.4s, %21.4s, v1.s[2] \n" "fmla v6.4s, %13.4s, v0.s[1] \n" "fmla v7.4s, %13.4s, v0.s[3] \n" "fmla v8.4s, %13.4s, v1.s[1] \n" "fmla v9.4s, %13.4s, v1.s[3] \n" "fmla v10.4s, %22.4s, v0.s[1] \n" "fmla v11.4s, %22.4s, v0.s[3] \n" "fmla v12.4s, %22.4s, v1.s[1] \n" "fmla v13.4s, %22.4s, v1.s[3] \n" // r1 "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.4s, v3.4s}, [%4], #32 \n" "ld1r {v5.4s}, [%4] \n" "fmla v6.4s, %14.4s, v0.s[2] \n" "fmla v7.4s, %14.4s, v1.s[0] \n" "fmla v8.4s, %14.4s, v1.s[2] \n" "fmla v9.4s, %14.4s, v4.s[0] \n" "fmla v10.4s, %23.4s, v0.s[2] \n" "fmla v11.4s, %23.4s, v1.s[0] \n" "fmla v12.4s, %23.4s, v1.s[2] \n" "fmla v13.4s, %23.4s, v4.s[0] \n" "fmla v6.4s, %15.4s, v2.s[0] \n" "fmla v7.4s, %15.4s, v2.s[2] \n" "fmla v8.4s, %15.4s, v3.s[0] \n" "fmla v9.4s, %15.4s, v3.s[2] \n" "fmla v10.4s, %24.4s, v2.s[0] \n" "fmla v11.4s, %24.4s, v2.s[2] \n" "fmla v12.4s, %24.4s, v3.s[0] \n" "fmla v13.4s, %24.4s, v3.s[2] \n" "fmla v6.4s, %16.4s, v2.s[1] \n" "fmla v7.4s, %16.4s, v2.s[3] \n" "fmla v8.4s, %16.4s, v3.s[1] \n" "fmla v9.4s, %16.4s, v3.s[3] \n" "fmla v10.4s, %25.4s, v2.s[1] \n" "fmla v11.4s, %25.4s, v2.s[3] \n" "fmla v12.4s, %25.4s, v3.s[1] \n" "fmla v13.4s, %25.4s, v3.s[3] \n" // r2 "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n" "ld1r {v4.4s}, [%5] \n" "fmla v6.4s, %17.4s, v2.s[2] \n" "fmla v7.4s, %17.4s, v3.s[0] \n" "fmla v8.4s, %17.4s, v3.s[2] \n" "fmla v9.4s, %17.4s, v5.s[0] \n" "fmla v10.4s, %26.4s, v2.s[2] \n" "fmla v11.4s, %26.4s, v3.s[0] \n" "fmla v12.4s, %26.4s, v3.s[2] \n" "fmla v13.4s, %26.4s, v5.s[0] \n" "fmla v6.4s, %18.4s, v0.s[0] \n" "fmla v7.4s, %18.4s, v0.s[2] \n" "fmla v8.4s, %18.4s, v1.s[0] \n" "fmla v9.4s, %18.4s, v1.s[2] \n" "fmla v10.4s, %27.4s, v0.s[0] \n" "fmla v11.4s, %27.4s, v0.s[2] \n" "fmla v12.4s, %27.4s, v1.s[0] \n" "fmla v13.4s, %27.4s, v1.s[2] \n" "fmla v6.4s, %19.4s, v0.s[1] \n" "fmla v7.4s, %19.4s, v0.s[3] \n" "fmla v8.4s, %19.4s, v1.s[1] \n" "fmla v9.4s, %19.4s, v1.s[3] \n" "fmla v10.4s, %28.4s, v0.s[1] \n" "fmla v11.4s, %28.4s, v0.s[3] \n" "fmla v12.4s, %28.4s, v1.s[1] \n" "fmla v13.4s, %28.4s, v1.s[3] \n" "fmla v6.4s, %20.4s, v0.s[2] \n" "fmla v7.4s, %20.4s, v1.s[0] \n" "fmla v8.4s, %20.4s, v1.s[2] \n" "fmla v9.4s, %20.4s, v4.s[0] \n" "fmla v10.4s, %29.4s, v0.s[2] \n" "fmla v11.4s, %29.4s, v1.s[0] \n" "fmla v12.4s, %29.4s, v1.s[2] \n" "fmla v13.4s, %29.4s, v4.s[0] \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n" "st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; remain > 0; remain--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr1); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); _sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); r0 += 2; r1 += 2; r2 += 2; outptr0 += 4; outptr1 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; for (; i < outh; i++) { int nn = outw >> 2; int remain = outw & 3; #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n" // sum0 // r0 "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" "ld1r {v4.4s}, [%2] \n" "fmla v6.4s, %10.4s, v0.s[0] \n" "fmla v7.4s, %10.4s, v0.s[2] \n" "fmla v8.4s, %10.4s, v1.s[0] \n" "fmla v9.4s, %10.4s, v1.s[2] \n" "fmla v6.4s, %11.4s, v0.s[1] \n" "fmla v7.4s, %11.4s, v0.s[3] \n" "fmla v8.4s, %11.4s, v1.s[1] \n" "fmla v9.4s, %11.4s, v1.s[3] \n" // r1 "prfm pldl1keep, [%3, #256] \n" "ld1 {v2.4s, v3.4s}, [%3], #32 \n" "ld1r {v5.4s}, [%3] \n" "fmla v6.4s, %12.4s, v0.s[2] \n" "fmla v7.4s, %12.4s, v1.s[0] \n" "fmla v8.4s, %12.4s, v1.s[2] \n" "fmla v9.4s, %12.4s, v4.s[0] \n" "fmla v6.4s, %13.4s, v2.s[0] \n" "fmla v7.4s, %13.4s, v2.s[2] \n" "fmla v8.4s, %13.4s, v3.s[0] \n" "fmla v9.4s, %13.4s, v3.s[2] \n" "fmla v6.4s, %14.4s, v2.s[1] \n" "fmla v7.4s, %14.4s, v2.s[3] \n" "fmla v8.4s, %14.4s, v3.s[1] \n" "fmla v9.4s, %14.4s, v3.s[3] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4s, v1.4s}, [%4], #32 \n" "ld1r {v4.4s}, [%4] \n" "fmla v6.4s, %15.4s, v2.s[2] \n" "fmla v7.4s, %15.4s, v3.s[0] \n" "fmla v8.4s, %15.4s, v3.s[2] \n" "fmla v9.4s, %15.4s, v5.s[0] \n" "fmla v6.4s, %16.4s, v0.s[0] \n" "fmla v7.4s, %16.4s, v0.s[2] \n" "fmla v8.4s, %16.4s, v1.s[0] \n" "fmla v9.4s, %16.4s, v1.s[2] \n" "fmla v6.4s, %17.4s, v0.s[1] \n" "fmla v7.4s, %17.4s, v0.s[3] \n" "fmla v8.4s, %17.4s, v1.s[1] \n" "fmla v9.4s, %17.4s, v1.s[3] \n" "fmla v6.4s, %18.4s, v0.s[2] \n" "fmla v7.4s, %18.4s, v1.s[0] \n" "fmla v8.4s, %18.4s, v1.s[2] \n" "fmla v9.4s, %18.4s, v4.s[0] \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #512] \n" "vldm %1, {d0-d7} \n" // sum0 // r0 "pld [%2, #256] \n" "vld1.f32 {d8-d11}, [%2]! \n" "vld1.f32 {d12[]}, [%2] \n" "vmla.f32 q0, %q10, d8[0] \n" "vmla.f32 q1, %q10, d9[0] \n" "vmla.f32 q2, %q10, d10[0] \n" "vmla.f32 q3, %q10, d11[0] \n" "vmla.f32 q0, %q11, d8[1] \n" "vmla.f32 q1, %q11, d9[1] \n" "vmla.f32 q2, %q11, d10[1] \n" "vmla.f32 q3, %q11, d11[1] \n" "vmla.f32 q0, %q12, d9[0] \n" "vmla.f32 q1, %q12, d10[0] \n" "vmla.f32 q2, %q12, d11[0] \n" // r1 "pld [%3, #256] \n" "vld1.f32 {d8-d11}, [%3]! \n" "vld1.f32 {d13[]}, [%3] \n" "vmla.f32 q3, %q12, d12[0] \n" "vmla.f32 q0, %q13, d8[0] \n" "vmla.f32 q1, %q13, d9[0] \n" "vmla.f32 q2, %q13, d10[0] \n" "vmla.f32 q3, %q13, d11[0] \n" "vmla.f32 q0, %q14, d8[1] \n" "vmla.f32 q1, %q14, d9[1] \n" "vmla.f32 q2, %q14, d10[1] \n" "vmla.f32 q3, %q14, d11[1] \n" "vmla.f32 q0, %q15, d9[0] \n" "vmla.f32 q1, %q15, d10[0] \n" "vmla.f32 q2, %q15, d11[0] \n" // r2 "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4]! \n" "vld1.f32 {d12[]}, [%4] \n" "vmla.f32 q3, %q15, d13[0] \n" "vmla.f32 q0, %q16, d8[0] \n" "vmla.f32 q1, %q16, d9[0] \n" "vmla.f32 q2, %q16, d10[0] \n" "vmla.f32 q3, %q16, d11[0] \n" "vmla.f32 q0, %q17, d8[1] \n" "vmla.f32 q1, %q17, d9[1] \n" "vmla.f32 q2, %q17, d10[1] \n" "vmla.f32 q3, %q17, d11[1] \n" "vmla.f32 q0, %q18, d9[0] \n" "vmla.f32 q1, %q18, d10[0] \n" "vmla.f32 q2, %q18, d11[0] \n" "vmla.f32 q3, %q18, d12[0] \n" "subs %0, %0, #1 \n" "vstm %1!, {d0-d7} \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); } #endif // __aarch64__ for (; remain > 0; remain--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1q_f32(outptr0, _sum0); r0 += 2; r1 += 2; r2 += 2; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; } } }
test1.c
int main() { #pragma omp metadirective for(int i=0; i<100; i++) ; return 0; }
engine.c
#include "mana/core/engine.h" int engine_init(struct Engine* engine, struct EngineSettings engine_settings) { engine->engine_settings = engine_settings; const char* graphics_lbrary_extensions[MAX_GRAPHICS_LIBRARY_EXTENSIONS] = {0}; uint32_t graphics_library_extension_count; int graphics_library_error = graphics_library_init(&engine->graphics_library, engine_settings.graphics_library_type, graphics_lbrary_extensions, &graphics_library_extension_count); switch (graphics_library_error) { case (GRAPHICS_LIBRARY_SUCCESS): break; case (GRAPHICS_LIBRARY_GLFW_ERROR): fprintf(stderr, "Failed to setup glfw for engine!\n"); return ENGINE_GRAPHICS_LIBRARY_ERROR; default: fprintf(stderr, "Unknown graphics library error! Error code: %d\n", graphics_library_error); return ENGINE_GRAPHICS_LIBRARY_ERROR; } int gpu_api_error = gpu_api_init(&engine->gpu_api, engine_settings.gpu_api_type, &engine->graphics_library, graphics_lbrary_extensions, &graphics_library_extension_count); switch (gpu_api_error) { case (GPU_API_SUCCESS): break; case (GPU_API_VULKAN_ERROR): fprintf(stderr, "Failed to setup Vulkan API for engine!\n"); return ENGINE_GPU_API_ERROR; default: fprintf(stderr, "Unknown gpu api error! Error code: %d\n", gpu_api_error); return ENGINE_GPU_API_ERROR; } return ENGINE_SUCCESS; } void engine_delete(struct Engine* engine) { gpu_api_delete(&engine->gpu_api); graphics_library_delete(&engine->graphics_library); } double engine_get_time() { struct timespec current_time; timespec_get(&current_time, TIME_UTC); double time = (double)current_time.tv_sec + (double)current_time.tv_nsec / 1000000000; return time; } int engine_get_max_omp_threads() { int max_omp_threads = 1; #pragma omp parallel { #pragma omp single max_omp_threads = omp_get_num_threads(); } return max_omp_threads; }
GB_unop__atan_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__atan_fc64_fc64 // op(A') function: GB_unop_tran__atan_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = catan (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = catan (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = catan (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__atan_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = catan (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = catan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__atan_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/color-private.h" #include "MagickCore/cache.h" #include "MagickCore/draw.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/resample.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/signature-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/option.h" // iOS: #include "ios_error.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; PixelInterpolateMethod interpolate; VirtualPixelMethod virtual_pixel; FilterType filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; PixelInfo average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { register ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); resample_filter=(ResampleFilter *) AcquireCriticalMemory(sizeof( *resample_filter)); (void) memset(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireVirtualCacheView(resample_filter->image, exception); resample_filter->debug=IsEventLogging(); resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined=MagickFalse; resample_filter->signature=MagickCoreSignature; SetResampleFilter(resample_filter,image->filter); (void) SetResampleFilterInterpolateMethod(resample_filter,image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickCoreSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,PixelInfo *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, PixelInfo *pixel,ExceptionInfo *exception) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; register double weight; register const Quantum *pixels; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); status=MagickTrue; /* GetPixelInfo(resample_filter->image,pixel); */ if ( resample_filter->do_interpolate ) { status=InterpolatePixelInfo(resample_filter->image,resample_filter->view, resample_filter->interpolate,u0,v0,pixel,resample_filter->exception); return(status); } #if DEBUG_ELLIPSE (void) FormatLocaleFile(thread_stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image Proper? If and that area a simple solid color - then simply return that color! This saves a lot of calculation when resampling outside the bounds of the source image. However it probably should be expanded to image bounds plus the filters scaled support size. */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* The area being resampled is simply a solid color * just return a single lookup color. * * Should this return the users requested interpolated color? */ status=InterpolatePixelInfo(resample_filter->image,resample_filter->view, IntegerInterpolatePixel,u0,v0,pixel,resample_filter->exception); return(status); } /* When Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there more direct way? */ status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetPixelInfo(resample_filter->image,(PixelInfo *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireVirtualCacheView(average_image,exception); pixels=GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const Quantum *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } GetPixelInfoPixel(resample_filter->image,pixels, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is a alpha blend of the image's average pixel color and the current background color */ /* image's average pixel color */ weight = QuantumScale*((double) resample_filter->average_pixel.alpha); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; /* background color */ weight = QuantumScale*((double) resample_filter->image->background_color.alpha); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.alpha += resample_filter->image->background_color.alpha; divisor_c += weight; /* alpha blend */ resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.alpha /= 2; /* 50% blend */ } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->black = 0.0; if (pixel->alpha_trait != UndefinedPixelTrait) pixel->alpha = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE (void) FormatLocaleFile(thread_stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); (void) FormatLocaleFile(thread_stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ (void) FormatLocaleFile(thread_stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const Quantum *) NULL) return(MagickFalse); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif pixel->alpha += weight*GetPixelAlpha(resample_filter->image,pixels); divisor_m += weight; if (pixel->alpha_trait != UndefinedPixelTrait) weight *= QuantumScale*((double) GetPixelAlpha(resample_filter->image,pixels)); pixel->red += weight*GetPixelRed(resample_filter->image,pixels); pixel->green += weight*GetPixelGreen(resample_filter->image,pixels); pixel->blue += weight*GetPixelBlue(resample_filter->image,pixels); if (pixel->colorspace == CMYKColorspace) pixel->black += weight*GetPixelBlack(resample_filter->image,pixels); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ (void) FormatLocaleFile(thread_stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(thread_stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { (void) FormatLocaleFile(thread_stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(thread_stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels+=GetPixelChannels(resample_filter->image); Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE (void) FormatLocaleFile(thread_stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) { /* not enough pixels, or bad weighting in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->alpha = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; if (pixel->alpha_trait != UndefinedPixelTrait) pixel->alpha = (double) ClampToQuantum(divisor_m*pixel->alpha); divisor_c = 1.0/divisor_c; pixel->red = (double) ClampToQuantum(divisor_c*pixel->red); pixel->green = (double) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (double) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->black = (double) ClampToQuantum(divisor_c*pixel->black); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Output: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center of a unit disk in output space, using the position of the * corresponding point [s,t] in input space. Following the clamping, * the square of this distance is * * ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2 * + * ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2 * * If such distances will be computed for many [s,t]'s, it makes * sense to actually compute the reciprocal of major_mag and * minor_mag and multiply them by the above unit lengths. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinv = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); /* * In exact arithmetic, discriminant can't be negative. In floating * point, it can, because of the bad conditioning of SVD * decompositions done through the associated normal matrix. */ const double sqrt_discriminant = sqrt(discriminant > 0.0 ? discriminant : 0.0); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(thread_stderr, "# -----\n" ); (void) FormatLocaleFile(thread_stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE (void) FormatLocaleFile(thread_stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE (void) FormatLocaleFile(thread_stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major=MagickMaximumValue; else Major=sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); (void) FormatLocaleFile(thread_stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); (void) FormatLocaleFile(thread_stderr, "# Angle=%lf Area=%lf\n", (double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickMaximumValue ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support) Simplier to just multiply it by the support twice! */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B)); resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(thread_stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { register double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale = (double)WLUT_WIDTH/F; #else /* scale so that F = resample_filter->F (support^2) */ scale = resample_filter->F/F; #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterType filter) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterType filter) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; /* Default cylindrical filter is a Cubic Keys filter */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; if ( resample_filter->filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to Interpolated 'Point' filter"); resample_filter->filter = PointFilter; resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. Basically it generates a Gaussian bell curve, with sigma = 0.5 if the support is 2.0 Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The it comes from reference code provided by Fred Weinhaus. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { if (IsStringTrue(GetImageArtifact(resample_filter->image, "resample:verbose")) != MagickFalse) { register int Q; double r_scale; /* Debug output of the filter weighting LUT Gnuplot the LUT data, the x scale index has been adjusted plot [0:2][-.2:1] "lut.dat" with lines The filter values should be normalized for comparision */ fprintf(thread_stdout, "#\n"); fprintf(thread_stdout, "# Resampling Filter LUT (%d values) for '%s' filter\n", WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions, resample_filter->filter) ); fprintf(thread_stdout, "#\n"); fprintf(thread_stdout, "# Note: values in table are using a squared radius lookup.\n"); fprintf(thread_stdout, "# As such its distribution is not uniform.\n"); fprintf(thread_stdout, "#\n"); fprintf(thread_stdout, "# The X value is the support distance for the Y weight\n"); fprintf(thread_stdout, "# so you can use gnuplot to plot this cylindrical filter\n"); fprintf(thread_stdout, "# plot [0:2][-.2:1] \"lut.dat\" with lines\n"); fprintf(thread_stdout, "#\n"); /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) fprintf(thread_stdout, "%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); fprintf(thread_stdout, "\n\n"); /* generate a 'break' in gnuplot if multiple outputs */ } /* Output the above once only for each image, and each setting (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); */ } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const PixelInterpolateMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" #include "../operator_common.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } public: /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } private: template<typename LOP, typename ROP> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { using namespace mxnet_op; const size_t size = static_cast<size_t>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, cpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, cpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } }); } template<typename LOP, typename ROP> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const size_t size = static_cast<size_t>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel< mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, cpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr); }); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const size_t size = static_cast<size_t>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel< mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, cpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr); }); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 RspRspOp<LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); // lhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); } // rhs grad if (req[1] != kNullOp) { RspRspOp<ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); // rhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); } } public: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- RSP binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsRspDnsOp(mshadow::Stream<xpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the binary inputs to be dense and still produce a sparse output. * Typically used for sparse * dense = sparse. * Note: for csr, it dispatches to fallback other than csr, csr -> csr * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) { // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Allow one of the inputs to be dense and produce a dense output, * for rsp inputs only support when both inputs are rsp type. * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool cpu_only, bool rsp, bool csr> static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2); CHECK_EQ(out_attrs->size(), 1); const auto lhs_stype = (*in_attrs)[0]; const auto rhs_stype = (*in_attrs)[1]; bool dispatched = false; const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns ... -> dns dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp, ... -> rsp dispatched = storage_type_assign(out_attrs, kRowSparseStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr, ... -> csr dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { // dense, csr -> dense / csr, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) { // dense, rsp -> dense / rsp, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void MixedUnaryBackwardUseInCompute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for " << mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void MixedUnaryBackwardUseInOutCompute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 1U); if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for " << mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[2].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[2].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeWithBool(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[1].type_flag_, EType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), inputs[1].dptr<EType>()); } }); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); } else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kRowSparseStorage); const NDArray& rsp = (reverse)? inputs[0] : inputs[1]; DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kCSRStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); BackwardUseNone_<LOP, ROP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); BackwardUseIn_<LOP, ROP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] RspRspOpBackward<xpu, LOP, ROP, false, false, false>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); } else { LOG(FATAL) << "Not Implemented"; } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, with FComputeEx for csr and rsp available. when inputs contain both sparse and dense, sparse output is preferred. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferSparseStorageType) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) /*! \brief Binary launch, with FComputeEx for prefer dense */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) #if MXNET_USE_CUDA struct ElemwiseBinaryRTCCompute { std::string OP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; struct ElemwiseBinaryRTCBwdUseNone { std::string LOP; std::string ROP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; struct ElemwiseBinaryRTCBwdUseIn { std::string LOP; std::string ROP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; #endif } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
lone_target_exit_data.c
// Check that a target exit data directive behaves correctly when the runtime // has not yet been initialized. // RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu // RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda #include <stdio.h> int main() { // CHECK: x = 98 int x = 98; #pragma omp target exit data map(from:x) printf("x = %d\n", x); return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32));t3<=min(min(min(floord(4*t2+Ny,32),floord(Nt+Ny-4,32)),floord(2*t1+Ny+1,32)),floord(4*t1-4*t2+Nz+Ny-1,32));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(4*t2-Nz-508,512)),ceild(32*t3-Ny-508,512));t4<=min(min(min(min(floord(4*t2+Nx,512),floord(Nt+Nx-4,512)),floord(2*t1+Nx+1,512)),floord(32*t3+Nx+28,512)),floord(4*t1-4*t2+Nz+Nx-1,512));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),32*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),32*t3+30),512*t4+510),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
ten_tusscher_3_sensibility.c
#include "model_common.h" #include <assert.h> #include <stdlib.h> #include "ten_tusscher_3_sensibility.h" #define ENDO GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions if (extra_data == NULL) { sv[0] = -86.2f; // V; millivolt sv[1] = 0.0f; //M sv[2] = 0.75; //H sv[3] = 0.75; //J sv[4] = 0.0f; //Xr1 sv[5] = 0.0f; //Xs sv[6] = 1.0f; //S sv[7] = 1.0f; //F sv[8] = 1.0f; //F2 sv[9] = 0.0; //D_INF sv[10] = 0.0; //R_INF sv[11] = 0.0; //Xr2_INF} } else { real *initial_conditions = ((real*)extra_data) + 7; //pointer real *fibrosis = ((real*)extra_data) + 7 + 12; //pointer sv[0] = initial_conditions[0]; // V; millivolt sv[1] = initial_conditions[1]; //M sv[2] = initial_conditions[2]; //H sv[3] = initial_conditions[3]; //J sv[4] = initial_conditions[4]; //Xr1 sv[5] = initial_conditions[5]; //Xs sv[6] = initial_conditions[6]; //S sv[7] = initial_conditions[7]; //F sv[8] = initial_conditions[8]; //F2 sv[9] = initial_conditions[9]; //D_INF sv[10] = initial_conditions[10]; //R_INF sv[11] = initial_conditions[11]; //Xr2_INF} } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; real *fibrosis; // Default values for a healthy cell /////////// real atpi = 6.8f; real Ko = 5.4f; real Ki = 138.3f; real Vm_change = 0.0; real GNa_multiplicator = 1.0f; real GCa_multiplicator = 1.0f; real INaCa_multiplicator = 1.0f; real V_0 = -86.2f; // V; millivolt real M_0 = 0.0f; //M real H_0 = 0.75; //H real J_0 = 0.75; //J real Xr1_0 = 0.0f; //Xr1 real Xs_0 = 0.0f; //Xs real S_0 = 1.0f; //S real F_0 = 1.0f; //F real F2_0 = 1.0f; //F2 real D_inf_0 = 0.0; //D_INF real R_inf_0 = 0.0; //R_INF real Xr2_inf_0 = 0.0; //Xr2_INF} //////////////////////////////////// int num_extra_parameters = 7; int num_initial_conditions = 12; size_t extra_parameters_size = (num_extra_parameters+num_initial_conditions)*sizeof(real); if(extra_data) { fibrosis = ((real*)extra_data) + num_extra_parameters + num_initial_conditions; //pointer } else { extra_data = malloc(extra_parameters_size); ((real*)extra_data)[0] = atpi; ((real*)extra_data)[1] = Ko; ((real*)extra_data)[2] = Ki; ((real*)extra_data)[3] = Vm_change; ((real*)extra_data)[4] = GNa_multiplicator; ((real*)extra_data)[5] = GCa_multiplicator; ((real*)extra_data)[6] = INaCa_multiplicator; ((real*)extra_data)[7] = V_0; ((real*)extra_data)[8] = M_0; ((real*)extra_data)[9] = H_0; ((real*)extra_data)[10] = J_0; ((real*)extra_data)[11] = Xr1_0; ((real*)extra_data)[12] = Xs_0; ((real*)extra_data)[13] = S_0; ((real*)extra_data)[14] = F_0; ((real*)extra_data)[15] = F2_0; ((real*)extra_data)[16] = D_inf_0; ((real*)extra_data)[17] = R_inf_0; ((real*)extra_data)[18] = Xr2_inf_0; fibrosis = calloc(num_cells_to_solve, sizeof(real)); } int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i], fibrosis[i], extra_data); } } if(extra_data == NULL) free(fibrosis); } void solve_model_ode_cpu(real dt, real *sv, real stim_current, real fibrosis, real *extra_parameters) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt, fibrosis, extra_parameters); //THIS MODEL USES THE Rush Larsen Method TO SOLVE THE EDOS sv[0] = dt*rDY[0] + rY[0]; sv[1] = rDY[1]; sv[2] = rDY[2]; sv[3] = rDY[3]; sv[4] = rDY[4]; sv[5] = rDY[5]; sv[6] = rDY[6]; sv[7] = rDY[7]; sv[8] = rDY[8]; sv[9] = rDY[9]; sv[10] = rDY[10]; sv[11] = rDY[11]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt, real fibrosis, real *extra_parameters) { //fibrosis = 0 means that the cell is fibrotic, 1 is not fibrotic. Anything between 0 and 1 means border zone //THIS IS THE STATE VECTOR THAT WE NEED TO SAVE IN THE STEADY STATE const real svolt = sv[0]; const real sm = sv[1]; const real sh = sv[2]; const real sj = sv[3]; const real sxr1 = sv[4]; const real sxs = sv[5]; const real ss = sv[6]; const real sf = sv[7]; const real sf2 = sv[8]; const real D_INF = sv[9]; const real R_INF = sv[10]; const real Xr2_INF = sv[11]; const real natp = 0.24; // K dependence of ATP-sensitive K current const real nicholsarea = 0.00005; // Nichol's areas (cm^2) const real hatp = 2; // Hill coefficient //Linear changing of atpi depending on the fibrosis and distance from the center of the scar (only for border zone cells) real atpi = extra_parameters[0]; real atpi_change = 6.8f - atpi; atpi = atpi + atpi_change*fibrosis; //Extracellular potassium concentration was elevated //from its default value of 5.4 mM to values between 6.0 and 8.0 mM //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischemia and Re-entry real Ko = extra_parameters[1]; real Ko_change = 5.4f - Ko; Ko = Ko + Ko_change*fibrosis; real Ki = extra_parameters[2]; real Ki_change = 138.3 - Ki; Ki = Ki + Ki_change*fibrosis; real Vm_modifier = extra_parameters[3]; Vm_modifier = Vm_modifier - Vm_modifier*fibrosis; real GNa_multplicator = extra_parameters[4]; real GNa_multplicator_change = 1.0f - GNa_multplicator; GNa_multplicator = GNa_multplicator + GNa_multplicator_change*fibrosis; real GCaL_multplicator = extra_parameters[5]; real GCaL_multplicator_change = 1.0f - GCaL_multplicator; GCaL_multplicator = GCaL_multplicator + GCaL_multplicator_change*fibrosis; real INaCa_multplicator = extra_parameters[6]; real INaCa_multplicator_change = 1.0f - INaCa_multplicator; INaCa_multplicator = INaCa_multplicator + INaCa_multplicator_change*fibrosis; //real katp = 0.306; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry //real katp = 0.306; const real katp = -0.0942857142857*atpi + 0.683142857143; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry const real patp = 1/(1 + pow((atpi/katp),hatp)); const real gkatp = 0.000195/nicholsarea; const real gkbaratp = gkatp*patp*pow((Ko/5.4),natp); const real katp2= 1.4; const real hatp2 = 2.6; const real pcal = 1.0/(1.0 + pow((katp2/atpi),hatp2)); const real Cao=2.0; const real Nao=140.0; const real Cai=0.00007; const real Nai=7.67; //Constants const real R=8314.472; const real F=96485.3415; const real T=310.0; const real RTONF=(R*T)/F; //Parameters for currents //Parameters for IKr const real Gkr=0.101; //Parameters for Iks const real pKNa=0.03; #ifdef EPI const real Gks=0.257; #endif #ifdef ENDO const real Gks=0.392; #endif #ifdef MCELL const real Gks=0.098; #endif //Parameters for Ik1 const real GK1=5.405; //Parameters for Ito #ifdef EPI const real Gto=0.294; #endif #ifdef ENDO const real Gto=0.073; #endif #ifdef MCELL const real Gto=0.294; #endif //Parameters for INa const real GNa=14.838*GNa_multplicator; //ACIDOSIS //Parameters for IbNa const real GbNa=0.00029; //Parameters for INaK const real KmK=1.0; const real KmNa=40.0; const real knak=2.724; //Parameters for ICaL const real GCaL=0.2786*pcal*GCaL_multplicator; //ACIDOSIS //Parameters for IbCa const real GbCa=0.000592; //Parameters for INaCa const real knaca=1000; const real KmNai=87.5; const real KmCa=1.38; const real ksat=0.1; const real n=0.35; //Parameters for IpCa const real GpCa=0.1238; const real KpCa=0.0005; //Parameters for IpK; const real GpK=0.0293; const real Ek=RTONF*(log((Ko/Ki))); const real Ena=RTONF*(log((Nao/Nai))); const real Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); const real Eca=0.5*RTONF*(log((Cao/Cai))); real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real IKatp; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real Xr1_INF; real Xr2_INF_new; real TAU_Xr1; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF_new; real S_INF; real TAU_S; real Af; real Bf; real Cf; real Af2; real Bf2; real Cf2; real D_INF_new; real TAU_F; real F_INF; real TAU_F2; real F2_INF; real sItot; //Needed to compute currents Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*((svolt-Vm_modifier)-Ena); //ACIDOSIS ICaL=GCaL*D_INF*sf*sf2*((svolt-Vm_modifier)-60); //ACIDOSIS Ito=Gto*R_INF*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*Xr2_INF*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaCa = INaCa*INaCa_multplicator; //ACIDOSIS INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); IKatp = gkbaratp*(svolt-Ek); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + IKatp + stim_current; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF_new=1./(1.+exp((svolt-(-88.))/24.)); Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=(1400./(sqrt(1.+exp((5.-svolt)/6)))); Bxs=(1./(1.+exp((svolt-35.)/15.))); TAU_Xs=Axs*Bxs+80; #ifdef EPI R_INF_new=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF_new=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF_new=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF_new=1./(1.+exp((-8-svolt)/7.5)); F_INF=1./(1.+exp((svolt+20)/7)); Af=1102.5*exp(-(svolt+27)*(svolt+27)/225); Bf=200./(1+exp((13-svolt)/10.)); Cf=(180./(1+exp((svolt+30)/10)))+20; TAU_F=Af+Bf+Cf; F2_INF=0.67/(1.+exp((svolt+35)/7))+0.33; Af2=600*exp(-(svolt+27)*(svolt+27)/170); Bf2=7.75/(1.+exp((25-svolt)/10)); Cf2=16/(1.+exp((svolt+30)/10)); TAU_F2=Af2+Bf2+Cf2; //update voltage rDY_[0] = -sItot; //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[6]= S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[7] =F_INF-(F_INF-sf)*exp(-dt/TAU_F); rDY_[8] =F2_INF-(F2_INF-sf2)*exp(-dt/TAU_F2); rDY_[9] = D_INF_new; rDY_[10] = R_INF_new; rDY_[11] = Xr2_INF_new; }
neighbor.h
#pragma once class ExPair{ public: PS::S32 id_in; PS::S32 id_out; PS::S32 id_cluster; PS::S32 * rank_list; static PS::S32 size; static PS::S32 rem; static PS::S32 n_bit; static void initialize() { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); n_bit = 8 * sizeof(PS::S32); size = (PS::S32)std::ceil((PS::F64)n_proc/n_bit); rem = n_bit*size - n_proc; } static PS::S32 getSize() { return size+3; } ExPair(){ //PS::S32 myrank = PS::Comm::getRank(); id_in = id_out = id_cluster = 0; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; //setFlag(myrank); } ExPair(PS::S32 id_in0, PS::S32 id_out0, PS::S32 id_cluster0){ //PS::S32 myrank = PS::Comm::getRank(); id_in = id_in0; id_out = id_out0; id_cluster = id_cluster0; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; //setFlag(myrank); } ExPair(const ExPair & ep){ id_in = ep.id_in; id_out = ep.id_out; id_cluster = ep.id_cluster; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = ep.rank_list[i]; } ExPair &operator=(const ExPair & ep){ if ( this != &ep ){ id_in = ep.id_in; id_out = ep.id_out; id_cluster = ep.id_cluster; for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] = ep.rank_list[i]; } return *this; } ~ExPair(){ delete [] rank_list; } PS::S32 getId() const { return id_in; } std::pair<PS::S32,PS::S32> getPair() const { return std::make_pair(id_in, id_out); } PS::S32 getIdCluster() const { return id_cluster; } PS::S32 setIdCluster(PS::S32 id_cluster0) { return id_cluster = id_cluster0; } PS::S32 input(PS::S32 * inp){ id_in = inp[1]; id_out = inp[0]; id_cluster = inp[2]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = inp[i+3]; return size+3; } PS::S32 output(PS::S32 * outp){ outp[0] = id_in; outp[1] = id_out; outp[2] = id_cluster; for ( PS::S32 i=0; i<size; i++ ) outp[i+3] = rank_list[i]; return size+3; } bool checkFlag(const PS::S32 i) const { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; return rank_list[n] & (1<<ii); } void setFlag(const PS::S32 i) { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; rank_list[n] |= (1<<ii); } void unsetFlag(const PS::S32 i) { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; rank_list[n] &= ~(1<<ii); } void resetFlag() { for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; } bool equalFlag(const ExPair & ep) const { bool check = true; for ( PS::S32 i=0; i<size; i++ ) check &= (rank_list[i]==ep.rank_list[i]); return check; } PS::S32 getMinFlag() const { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); for (PS::S32 i=0; i<n_proc; i++) if ( checkFlag(i) ) return i; return n_proc; } void operator &= (const ExPair & ep) { for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] &= ep.rank_list[i]; } void operator |= (const ExPair & ep) { for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] |= ep.rank_list[i]; } bool exchange(const ExPair & ep) { bool check = (this->id_cluster != ep.id_cluster); this->id_cluster = std::min(this->id_cluster, ep.id_cluster); for ( PS::S32 i=0; i<size; i++ ) { check |= (this->rank_list[i] != ep.rank_list[i]); this->rank_list[i] |= ep.rank_list[i]; } return check; } void show(){ const PS::S32 n_proc = PS::Comm::getNumberOfProc(); std::cout << PS::Comm::getRank() << "\t" << id_in << "\t" << id_out << "\t" << id_cluster << "\t"; for ( PS::S32 i=0; i<n_proc; i++ ) std::cout << (checkFlag(i)); std::cout << std::endl; } }; PS::S32 ExPair::size; PS::S32 ExPair::rem; PS::S32 ExPair::n_bit; class NeighborList{ public: std::vector<std::vector<PS::S32> > n_list; std::map<PS::S32, PS::S32> id_map; std::vector<PS::S32> with_neighbor_list; std::vector<std::pair<PS::S32, PS::S32> > pair_list; std::vector<std::pair<PS::S32,PS::S32> > ex_list; std::vector<std::pair<PS::S32,PS::S32> > ex_adr_list; std::vector<PS::S32> connected_list; std::vector<std::vector<ExPair> > ex_data; std::map<std::pair<PS::S32,PS::S32>, std::pair<PS::S32, PS::S32> > ex_data_map; std::vector<std::vector<PS::S32> > recv_list; std::vector<std::vector<PS::S32> > send_list; std::vector<PS::S32> recv_rank_list; std::vector<PS::S32> send_rank_list; std::vector<PS::S32> & operator[](PS::S32 i){ return n_list[i]; } NeighborList() { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); n_list.clear(); id_map.clear(); with_neighbor_list.clear(); pair_list.clear(); ex_list.clear(); ex_adr_list.clear(); connected_list.clear(); ex_data_map.clear(); recv_rank_list.clear(); send_rank_list.clear(); ex_data.resize(n_proc); recv_list.resize(n_proc); send_list.resize(n_proc); #pragma omp parallel for for (PS::S32 i=0; i<n_proc; i++){ ex_data[i].clear(); recv_list[i].clear(); send_list[i].clear(); } ExPair::initialize(); } template <class Tpsys> void initializeList(Tpsys & pp) { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_loc = pp.getNumberOfParticleLocal(); n_list.clear(); //id_map.clear(); with_neighbor_list.clear(); pair_list.clear(); ex_list.clear(); ex_adr_list.clear(); connected_list.clear(); ex_data_map.clear(); recv_rank_list.clear(); send_rank_list.clear(); #pragma omp parallel for for ( PS::S32 i=0; i<n_proc; i++ ){ ex_data[i].clear(); recv_list[i].clear(); send_list[i].clear(); } n_list.resize(n_loc); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++) n_list.at(i).clear(); } ExPair & getExData(std::pair<PS::S32, PS::S32> adr) { return ex_data[adr.first][adr.second]; } PS::S32 getNumberOfParticlesWithNeighbor() const { return with_neighbor_list.size(); } PS::S32 getNumberOfNeighborPairsLocal() const { return pair_list.size(); } PS::S32 getNumberOfRankSend() const { return send_rank_list.size(); } PS::S32 getNumberOfRankRecv() const { return recv_rank_list.size(); } PS::S32 getNumberOfRankConnected() const { return connected_list.size(); } PS::S32 getNumberOfPairConnected(const PS::S32 ii) const { return ex_data[connected_list.at(ii)].size(); } template <class Tpsys> void addNeighbor(Tpsys & pp, PS::S32 i, PS::S32 j_id, PS::S32 j_rank, PS::S32 j_id_local=-1) { n_list[i].push_back(j_id); pp[i].neighbor ++; pp[i].id_cluster = std::min(pp[i].id_cluster, j_id); if ( j_rank != pp[i].myrank ) { #pragma omp critical { ex_list.push_back(std::make_pair(pp[i].id, j_id)); ex_adr_list.push_back(std::make_pair(j_rank, ex_data.at(j_rank).size())); ex_data_map[std::make_pair(pp[i].id, j_id)] = std::make_pair(j_rank, ex_data.at(j_rank).size()); ExPair ex_pair(pp[i].id, j_id, pp[i].id_cluster); ex_pair.setFlag(pp[i].myrank); ex_pair.setFlag(j_rank); ex_data.at(j_rank).push_back(ex_pair); } pp[i].inDomain = false; } else { if ( j_id_local < 0 ) j_id_local = id_map.at(j_id); if ( i<j_id_local ) { #pragma omp critical { pair_list.push_back(std::make_pair(i, j_id_local)); } } } } template <class Tpsys> void checkNeighbor(Tpsys & pp) { const PS::S32 n_loc = n_list.size(); bool check = true; PS::S32 nei_tot = 0; for ( PS::S32 i=0; i<n_loc; i++ ) { if ( !pp[i].isDead ) assert ( id_map.at(pp[i].id) == i ); } for ( PS::S32 i=0; i<n_loc; i++ ) { PS::S32 n_ngb = n_list.at(i).size(); //if ( pp[i].neighbor ) // std::cout << pp[i].id << "\t"; nei_tot += n_ngb; for ( PS::S32 jj=0; jj<n_ngb; jj++ ) { PS::S32 j_id = n_list.at(i).at(jj); //if ( pp[i].neighbor ) // std::cout << j_id << " "; auto itr = id_map.find(j_id); if ( itr == id_map.end() ) continue; PS::S32 j = itr->second; PS::S32 n_ngb_j = n_list.at(j).size(); PS::S32 n_p = 0; for ( PS::S32 k=0; k<n_ngb_j; k++ ) { PS::S32 k_id = n_list.at(j).at(k); auto itr1 = id_map.find(k_id); if ( itr1 == id_map.end() ) continue; if ( (itr1->second) == i ) n_p ++ ; } if ( n_p != 1 ) { std::cout << i << "\t" << pp[i].id << "\t" << j << "\t" << j_id << std::endl; std::cout << "Neighbor of " << pp[i].id << ": "; for (PS::S32 k=0; k<n_list.at(i).size(); k++) std::cout << n_list.at(i).at(k) << "\t"; std::cout << std::endl; std::cout << "Neighbor of " << j_id << ": "; for (PS::S32 k=0; k<n_list.at(j).size(); k++) std::cout << n_list.at(j).at(k) << "\t"; std::cout << std::endl; check = check && false; check = check && false; } } //if ( pp[i].neighbor ) // std::cout << std::endl; } PS::S32 nei_tot_glb = PS::Comm::getSum(nei_tot); assert ( nei_tot_glb%2 == 0 ); if ( false ) { PS::Abort(); } } void createConnectedRankList(){ const PS::S32 n_proc = PS::Comm::getNumberOfProc(); connected_list.clear(); for ( PS::S32 i=0; i<n_proc; i++ ) { if ( ex_data[i].size() ) { connected_list.push_back(i); assert( i != PS::Comm::getRank() ); } } } template <class Tpsys> void makeIdMap(Tpsys & pp){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); id_map.clear(); //assert( (PS::S32)(n_list.size()) == n_loc ); for(PS::S32 i=0; i<n_loc; i++){ //assert( pp[i].neighbor == (PS::S32)(n_list[i].size()) ); if ( !pp[i].isDead ) { id_map[pp[i].id] = i; } } } #if 1 template <class Tpsys> void createNeighborCluster(Tpsys & pp){ //const PS::S32 n_loc = pp.getNumberOfParticleLocal(); const PS::S32 n_wngb = with_neighbor_list.size(); const PS::S32 n_pair = pair_list.size(); bool check = true; while( check ){ check = false; #pragma omp parallel for reduction (||:check) for(PS::S32 ii=0; ii<n_pair; ii++){ PS::S32 i = pair_list.at(ii).first; PS::S32 j = pair_list.at(ii).second; if ( pp[i].id_cluster != pp[j].id_cluster ) { #pragma omp critical { pp[i].id_cluster = pp[j].id_cluster = std::min(pp[i].id_cluster, pp[j].id_cluster); } check = check || true; } } } if( ex_list.size() != 0 ){ PS::S32 n_out = ex_list.size(); #pragma omp parallel for for(PS::S32 ii=0; ii<n_wngb; ii++){ PS::S32 i = with_neighbor_list.at(ii); for(PS::S32 j=0; j<n_out; j++){ PS::S32 i_out = id_map.at(ex_list.at(j).first); PS::S32 id_cluster_out = pp[i_out].id_cluster; if( pp[i].id_cluster == id_cluster_out ) pp[i].inDomain = false; } } } } #else template <class Tpsys> void createNeighborCluster(Tpsys & pp){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); PS::S32 j_id_cluster = 0; PS::S32 id_cluster[n_loc]; bool check = true; while( check ){ check = false; #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ PS::S32 j_id = 0; PS::S32 nei = 0; nei = pp[i].neighbor; id_cluster[i] = pp[i].id_cluster; if(nei == 0) continue; for(PS::S32 j=0; j<nei; j++){ auto itr = id_map.find(n_list[i].at(j)); if ( itr == id_map.end() ) continue; j_id = itr->second; j_id_cluster = pp[j_id].id_cluster; if( id_cluster[i] > j_id_cluster ) id_cluster[i] = j_id_cluster; } } #pragma omp parallel for reduction (||:check) for(PS::S32 i=0; i<n_loc; i++){ if ( pp[i].id_cluster != id_cluster[i] ) { check = check || true; pp[i].id_cluster = id_cluster[i]; } assert( pp[i].id >= id_cluster[i] ); } } if( ex_list.size() != 0 ){ PS::S32 n_out = ex_list.size(); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ for(PS::S32 j=0; j<n_out; j++){ PS::S32 i_out = id_map.at(ex_list.at(j).first); PS::S32 id_cluster_out = pp[i_out].id_cluster; if( pp[i].id_cluster == id_cluster_out ) pp[i].inDomain = false; } } } } #endif template <class Tpsys> void inputExData(Tpsys & pp){ const PS::S32 n_out = ex_list.size(); for ( PS::S32 j=0; j<n_out; j++ ){ std::pair<PS::S32,PS::S32> pair = ex_list.at(j); std::pair<PS::S32,PS::S32> ex_adr = ex_adr_list.at(j); assert( getExData(ex_adr).getId() == pair.first ); getExData(ex_adr).setIdCluster(pp[id_map.at(pair.first)].id_cluster); for ( PS::S32 k=0; k<n_out; k++ ){ if ( k == j ) continue; //std::pair<PS::S32,PS::S32> pair2 = ex_list.at(k); std::pair<PS::S32,PS::S32> ex_adr2 = ex_adr_list.at(k); if ( getExData(ex_adr2).getIdCluster() == getExData(ex_adr).getIdCluster() ) { getExData(ex_adr).exchange(getExData(ex_adr2)); } } } } template <class Tpsys> bool exchangeExData(Tpsys & pp, PS::S32 TAG, PS::S32** & ex_data_send, PS::S32** & ex_data_recv){ //const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_send = connected_list.size(); //PS::S32 ** ex_data_send = new PS::S32*[n_send]; //PS::S32 ** ex_data_recv = new PS::S32*[n_send]; //for ( PS::S32 ii=0; ii<n_send; ii++ ) { // PS::S32 i = connected_list.at(ii); // PS::S32 n_size = ex_data[i].size() * ExPair::getSize(); // ex_data_send[ii] = new PS::S32[n_size]; // ex_data_recv[ii] = new PS::S32[n_size]; //} #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_data = ex_data[i].size(); PS::S32 jj = 0; for ( PS::S32 j=0; j<n_data; j++ ) { jj += ex_data[i][j].output(&ex_data_send[ii][jj]); } } #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_size = ex_data[i].size() * ExPair::getSize(); MPI_Isend(&ex_data_send[ii][0], n_size, PS::GetDataType(*ex_data_send[ii]), i, TAG, MPI_COMM_WORLD, &req0[ii]); MPI_Irecv(&ex_data_recv[ii][0], n_size, PS::GetDataType(*ex_data_recv[ii]), i, TAG, MPI_COMM_WORLD, &req1[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); #else assert ( n_send == 0 ); #endif bool check = false; #pragma omp parallel for reduction (||:check) for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_data = ex_data[i].size(); PS::S32 jj = 0; for ( PS::S32 j=0; j<n_data; j++ ) { ExPair recv_pair; jj += recv_pair.input(&ex_data_recv[ii][jj]); std::pair<PS::S32,PS::S32> adr = ex_data_map.at(recv_pair.getPair()); assert ( adr.first == i ); assert ( recv_pair.getPair() == getExData(adr).getPair() ); bool check_1 = getExData(adr).exchange(recv_pair); check = check || check_1; //getExData(adr).show(); #pragma omp critical { PS::S32 i_loc = id_map.at(getExData(adr).getId()); pp[i_loc].id_cluster = std::min(pp[i_loc].id_cluster, getExData(adr).getIdCluster()); } } //delete [] ex_data_send[ii]; //delete [] ex_data_recv[ii]; } //delete [] ex_data_send; //delete [] ex_data_recv; //PS::Comm::barrier(); //bool check_glb = PS::Comm::synchronizeConditionalBranchOR(check); return check; } template <class Tpsys> void selectSendRecvParticle(Tpsys & pp){ const PS::S32 myrank = PS::Comm::getRank(); const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_ptcl = ex_list.size(); std::vector<PS::S32> ex_cluster; std::vector<std::pair<PS::S32,PS::S32> > ex_cluster_adr; ex_cluster.clear(); ex_cluster_adr.clear(); for ( PS::S32 ii=0; ii<n_ptcl; ii++ ) { //std::pair<PS::S32,PS::S32> pair = ex_list.at(ii); std::pair<PS::S32,PS::S32> adr = ex_adr_list.at(ii); PS::S32 id_cluster = getExData(adr).id_cluster; PS::S32 n_l = ex_cluster.size(); std::pair<PS::S32,PS::S32> adr2 = std::make_pair(-1,-1); for (PS::S32 j=0; j<n_l; j++){ if ( id_cluster == ex_cluster.at(j) ){ adr2 = ex_cluster_adr.at(j); assert( getExData(adr).equalFlag(getExData(adr2)) ); } } if ( adr2 == std::make_pair(-1,-1) ){ ex_cluster.push_back(id_cluster); ex_cluster_adr.push_back(adr); PS::S32 min_rank = getExData(adr).getMinFlag(); if ( min_rank == myrank ) { for ( PS::S32 j=0; j<n_proc; j++ ) { if ( getExData(adr).checkFlag(j) ) { if ( j == myrank ) continue; recv_list[j].push_back(id_cluster); assert ( j > myrank ); } } } else { assert ( min_rank < myrank ); send_list[min_rank].push_back(id_cluster); } } } for ( PS::S32 i=0; i<n_proc; i++ ) { if ( recv_list[i].size() ) recv_rank_list.push_back(i); if ( send_list[i].size() ) send_rank_list.push_back(i); } } private: void operator =(const NeighborList& NL){} NeighborList(const NeighborList& NL) {} }; template <class Tp> class ExParticleSystem { public : PS::S32 n_send; PS::S32 n_recv; PS::S32 n_ex_ptcl_send_tot; PS::S32 n_ex_nei_send_tot; PS::S32 n_ex_ptcl_recv_tot; PS::S32 n_ex_nei_recv_tot; std::vector<Tp> ex_ptcl_send; std::vector<PS::S32> ex_nei_send; std::vector<Tp> ex_ptcl_recv; std::vector<PS::S32> ex_nei_recv; std::vector<std::vector<PS::S32> > ex_ptcl_send_list; std::vector<PS::S32*> n_list; std::vector<PS::S32> n_ex_ptcl_send; std::vector<PS::S32> n_ex_nei_send; std::vector<PS::S32> n_ex_ptcl_recv; std::vector<PS::S32> n_ex_nei_recv; std::vector<PS::S32> adr_ex_ptcl_send; std::vector<PS::S32> adr_ex_nei_send; std::vector<PS::S32> adr_ex_ptcl_recv; std::vector<PS::S32> adr_ex_nei_recv; Tp & operator[](PS::S32 i){ return ex_ptcl_recv[i]; } PS::S32 getNumberOfParticleLocal() const { return n_ex_ptcl_recv_tot; } void initialize() { n_send = n_recv = 0; n_ex_ptcl_send_tot = n_ex_ptcl_recv_tot = 0; n_ex_nei_send_tot = n_ex_nei_recv_tot = 0; ex_ptcl_send.clear(); ex_nei_send.clear(); ex_ptcl_recv.clear(); ex_nei_recv.clear(); ex_ptcl_send_list.clear(); n_ex_ptcl_send.clear(); n_ex_nei_send.clear(); n_ex_ptcl_recv.clear(); n_ex_nei_recv.clear(); adr_ex_ptcl_send.clear(); adr_ex_nei_send.clear(); adr_ex_ptcl_recv.clear(); adr_ex_nei_recv.clear(); } void resize(PS::S32 n_send0, PS::S32 n_recv0){ n_send = n_send0; n_ex_ptcl_send.resize(n_send); n_ex_nei_send.resize(n_send); adr_ex_ptcl_send.resize(n_send); adr_ex_nei_send.resize(n_send); ex_ptcl_send_list.resize(n_send); #pragma omp parallel for for ( PS::S32 i=0; i<n_send; i++ ) ex_ptcl_send_list[i].clear(); n_recv = n_recv0; n_ex_ptcl_recv.resize(n_recv); n_ex_nei_recv.resize(n_recv); adr_ex_ptcl_recv.resize(n_recv); adr_ex_nei_recv.resize(n_recv); } PS::S32 getNumberOfParticleSend() const { return n_ex_ptcl_send_tot; } PS::S32 getNumberOfParticleRecv() const { return n_ex_ptcl_recv_tot; } PS::S32 getNumberOfNeighborSend() const { return n_ex_nei_send_tot; } PS::S32 getNumberOfNeighborRecv() const { return n_ex_nei_recv_tot; } template <class Tpsys> void inputNumberOfExParticleSend(Tpsys & pp, NeighborList & NList){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) n_ex_ptcl_send[ii] = n_ex_nei_send[ii] = 0; if ( n_send ) { #pragma omp parallel for for ( PS::S32 i=0; i<n_loc; i++) { if ( !pp[i].inDomain ) { for ( PS::S32 jj=0; jj<n_send; jj++ ){ PS::S32 j = NList.send_rank_list[jj]; PS::S32 n_data = NList.send_list[j].size(); for ( PS::S32 k=0; k<n_data; k++ ) { if ( NList.send_list[j][k] == pp[i].id_cluster ) { #pragma omp critical { n_ex_ptcl_send[jj] ++; n_ex_nei_send[jj] += pp[i].neighbor; assert ( pp[i].neighbor == (PS::S32)(NList.n_list[i].size()) ); ex_ptcl_send_list[jj].push_back(i); } } } } } } } #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) assert( ex_ptcl_send_list[ii].size() ); } void sendRecvNumberOfExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Isend(&n_ex_ptcl_send[ii], 1, PS::GetDataType(n_ex_ptcl_send[0]), i, TAG, MPI_COMM_WORLD, &req0[ii]); MPI_Isend(&n_ex_nei_send[ii], 1, PS::GetDataType(n_ex_nei_send[0]), i, TAG+1, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Irecv(&n_ex_ptcl_recv[ii], 1, PS::GetDataType(n_ex_ptcl_recv[0]), i, TAG, MPI_COMM_WORLD, &req2[ii]); MPI_Irecv(&n_ex_nei_recv[ii], 1, PS::GetDataType(n_ex_nei_recv[0]), i, TAG+1, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } void inputAdress(){ n_ex_ptcl_send_tot = n_ex_nei_send_tot = 0; for (PS::S32 i=0; i<n_send; i++){ adr_ex_ptcl_send.at(i) = n_ex_ptcl_send_tot; adr_ex_nei_send.at(i) = n_ex_nei_send_tot; n_ex_ptcl_send_tot += n_ex_ptcl_send.at(i); n_ex_nei_send_tot += n_ex_nei_send.at(i); } n_ex_ptcl_recv_tot = n_ex_nei_recv_tot = 0; for (PS::S32 i=0; i<n_recv; i++){ adr_ex_ptcl_recv.at(i) = n_ex_ptcl_recv_tot; adr_ex_nei_recv.at(i) = n_ex_nei_recv_tot; n_ex_ptcl_recv_tot += n_ex_ptcl_recv.at(i); n_ex_nei_recv_tot += n_ex_nei_recv.at(i); } ex_ptcl_send.resize(n_ex_ptcl_send_tot); ex_nei_send.resize(n_ex_nei_send_tot); ex_ptcl_recv.resize(n_ex_ptcl_recv_tot); ex_nei_recv.resize(n_ex_nei_recv_tot); n_list.resize(n_ex_ptcl_recv_tot); } template <class Tpsys> void inputExParticleSend(Tpsys & pp, NeighborList & NList){ #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 n_data = n_ex_ptcl_send.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_send.at(ii); PS::S32 adr_nei = adr_ex_nei_send.at(ii); PS::S32 n_nei = 0; for ( PS::S32 jj=0; jj<n_data; jj++ ) { PS::S32 j = ex_ptcl_send_list[ii].at(jj); pp[j].isSent = true; ex_ptcl_send.at(adr_ptcl + jj) = pp[j]; assert( !pp[j].inDomain ); for ( PS::S32 k=0; k<pp[j].neighbor; k++ ) { ex_nei_send.at(adr_nei + n_nei) = NList.n_list[j].at(k); n_nei ++; } } assert ( n_ex_nei_send.at(ii) == n_nei ); } } void sendRecvExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Isend(&ex_ptcl_send[adr_ex_ptcl_send[ii]], n_ex_ptcl_send[ii], PS::GetDataType(ex_ptcl_send[0]), i, TAG+2, MPI_COMM_WORLD, &req0[ii]); MPI_Isend(&ex_nei_send[adr_ex_nei_send[ii]], n_ex_nei_send[ii], PS::GetDataType(ex_nei_send[0]), i, TAG+3, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Irecv(&ex_ptcl_recv[adr_ex_ptcl_recv[ii]], n_ex_ptcl_recv[ii], PS::GetDataType(ex_ptcl_recv[0]), i, TAG+2, MPI_COMM_WORLD, &req2[ii]); MPI_Irecv(&ex_nei_recv[adr_ex_nei_recv[ii]], n_ex_nei_recv[ii], PS::GetDataType(ex_nei_recv[0]), i, TAG+3, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } void inputNeighborListOfExParticleRecv() { #pragma omp parallel for for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 n_data = n_ex_ptcl_recv.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_recv.at(ii); PS::S32 n_nei = adr_ex_nei_recv.at(ii); for ( PS::S32 jj=0; jj<n_data; jj++ ) { n_list.at(adr_ptcl + jj) = &(ex_nei_recv.at(n_nei)); n_nei += ex_ptcl_recv.at(adr_ptcl + jj).neighbor; assert ( ex_ptcl_recv.at(adr_ptcl + jj).isSent ); } if ( ii+1<n_recv ) assert ( adr_ex_nei_recv.at(ii+1) == n_nei ); } } void returnExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Irecv(&ex_ptcl_send[adr_ex_ptcl_send[ii]], n_ex_ptcl_send[ii], PS::GetDataType(ex_ptcl_send[0]), i, TAG+4, MPI_COMM_WORLD, &req0[ii]); MPI_Irecv(&ex_nei_send[adr_ex_nei_send[ii]], n_ex_nei_send[ii], PS::GetDataType(ex_nei_send[0]), i, TAG+5, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Isend(&ex_ptcl_recv[adr_ex_ptcl_recv[ii]], n_ex_ptcl_recv[ii], PS::GetDataType(ex_ptcl_recv[0]), i, TAG+4, MPI_COMM_WORLD, &req2[ii]); MPI_Isend(&ex_nei_recv[adr_ex_nei_recv[ii]], n_ex_nei_recv[ii], PS::GetDataType(ex_nei_recv[0]), i, TAG+5, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } template <class Tpsys> void outputExParticleSend(Tpsys & pp, NeighborList & NList){ #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 n_data = n_ex_ptcl_send.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_send.at(ii); for ( PS::S32 jj=0; jj<n_data; jj++ ) { PS::S32 j = ex_ptcl_send_list[ii].at(jj); PS::S32 id_pre = pp[j].id; pp[j] = ex_ptcl_send.at(adr_ptcl + jj); if (!pp[j].isDead) assert( pp[j].id == id_pre ); } } } };
MG_Power_Spectrum.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #define PI 3.141592 #define Nphi 500 #define Ndel 200 /*Structure with the information about the cosmolgy*/ typedef struct Cosmology { float H0; float w; float Ob; float Odm; float Om; float Ol; float Ok; float Onu; float A; float ns; float Yhe; float T; } COSMOLOGY; /*Structure with the information about Hu-Sawicki theory parameters*/ typedef struct HuSawicki { float n; float fr0; } HS; /*Structure with the information about Symmetron theory parameters*/ typedef struct Symmetron { float L; float zssb; float beta; } SYM; /*Global variables contaning the information about the cosmoloy and modified gravity*/ int model; float *u; COSMOLOGY cosmo; HS hs; SYM sym; /*The Hubble parameters over H0 (E(a))*/ float E(float a){ float resp; resp = sqrt(cosmo.Om*pow(a,-3) + cosmo.Ok*pow(a,-2) + cosmo.Ol*pow(a,-3.0*(1.0 + cosmo.w))); return resp; } /*Derivative of equation above*/ float dE(float a){ float resp; resp = -0.5*(3.0*cosmo.Om*pow(a,-4) + 2.0*cosmo.Ok*pow(a,-3))/E(a); return resp; } /*Equation for \dot{v} = \ddot{u}*/ float ddu(float a, float u, float v, float mu2, float assb){ float resp; resp = -(4.0/a + dE(a)/E(a))*v - mu2/(a*a*E(a)*E(a))*((pow(assb/a,3) - 1.0)*u + pow(u,3)); return resp; } /*Equation for \dot{u}*/ float du(float a, float u, float v){ return v; } /*Solve the equation for the symmetron field*/ void phi(float aini, float assb, float mu2, float u[], int N){ float *v, h, a; float q1, q2, q3, q4, k1, k2, k3, k4; int i, j; /*Allocate the velocity vector*/ v = (float*)malloc(N*sizeof(float)); /*Initial conditions*/ u[0] = 1e-3; v[0] = 1e-3; /*Solve the EDO*/ h = (1.0 - aini)/(N-1); /*Solve the EDO for each k using Runge-Kutta 4*/ a = aini; for(i=0;i<N-1;i++){ q1 = du(a, u[i], v[i]); k1 = ddu(a, u[i], v[i], mu2, assb); q2 = du(a+h/2.0, u[i]+h/2.0*q1, v[i]+h/2.0*k1); k2 = ddu(a+h/2.0, u[i]+h/2.0*q1, v[i]+h/2.0*k1, mu2, assb); q3 = du(a+h/2.0, u[i]+h/2.0*q2, v[i]+h/2.0*k2); k3 = ddu(a+h/2.0, u[i]+h/2.0*q2, v[i]+h/2.0*k2, mu2, assb); q4 = du(a+h, u[i]+h*q3, v[i]+h*k3); k4 = ddu(a+h, u[i]+h*q3, v[i]+h*k3, mu2, assb); u[i+1] = u[i] + h/6.0*(q1 + 2.0*q2 + 2.0*q3 + q4); v[i+1] = v[i] + h/6.0*(k1 + 2.0*k2 + 2.0*k3 + k4); a = a + h; } } /*Mass of the symmetron field*/ float Mass_sym(float a, float assb, float mu2){ float m2; if(a < assb) m2 = mu2*(pow(assb/a, 3.0) - 1.0); else m2 = 2.0*mu2*(1.0 - pow(assb/a, 3.0)); return m2; } /*Wave lenght of the symmetron field*/ float Lamb_sym(float a, float assb){ float lamb2; if(a < assb) lamb2 = 2.0*pow(sym.L,2)/(pow(assb/a, 3.0) - 1.0); else lamb2 = pow(sym.L,2)/(1.0 - pow(assb/a, 3.0)); return lamb2; } /*The value of the potential minimum*/ float min(float a, float assb){ float resp; if(a<assb) resp = 0.0; else resp = sqrt(1.0 - pow(assb/a, 3.0)); return resp; } /*Function that sumarize the modied Poisson's equation in f(R)*/ float Mu(float k, float a){ float resp; if(model == 1){ float m0, m; m0 = (100.0/299792.0)*sqrt((cosmo.Om + 4.0*cosmo.Ol)/((hs.n + 1.0)*hs.fr0)); m = m0*pow((cosmo.Om*pow(a,-3)+4.0*cosmo.Ol)/(cosmo.Om + 4.0*cosmo.Ol),(hs.n+2.0)/2.0); resp = ((4.0/3.0)*pow(k,2) + pow(m*a,2))/(pow(k,2) + pow(m*a,2)); } if(model == 2){ float lamb2, assb; int i; assb = 1.0/(1.0 + sym.zssb); lamb2 = Lamb_sym(a,assb); i = (int)floor((a - 0.01)/(1.0 - 0.01)*(Nphi*Ndel - 1.0)); /*resp = 1.0 + 2.0*pow(min(a,assb)*sym.beta,2)/(1.0 + pow(a/k,2)/lamb2);*/ if(a<=assb) resp = 1.0; else resp = 1.0 + (2.0*pow(sym.beta*sym.L*k, 2.0)*(1.0 - pow(assb/a,3.0)))/(pow(k*sym.L,2.0) + a*a - pow(assb,3)/a); } return resp; } /*Equation for \dot{v} = \ddot{\delta}*/ float dv(float k, float a, float d, float v){ float resp; resp = -(3.0/a + dE(a)/E(a))*v + 3.0/2.0*cosmo.Om*Mu(k, a)/(pow(E(a),2)*pow(a,5))*d; return resp; } /*Equation for \dot{\delta}*/ float dd(float a, float d, float v){ return v; } /*Create the param file for input in CAMB*/ void Create_Params_ini(float Rmin, float zini){ FILE *camb; char *cambfile = "params_mg.ini"; float kmax; kmax = 2.0*PI/Rmin; camb = fopen(cambfile, "w"); if (camb == NULL) { printf("Unable to open %s\n",cambfile); exit(0); } fprintf(camb, "output_root = test\n" "get_scalar_cls = F\nget_vector_cls = F\nget_tensor_cls = F\nget_transfer = T\n" "do_lensing = T\n" "do_nonlinear = 0\n" "l_max_scalar = 2200\nk_eta_max_scalar = 4000\n" "l_max_tensor = 1500\nk_eta_max_tensor = 3000\n" "use_physical = F\n#ombh2 = 0.0226\n#omch2 = 0.112\n#omnuh2 = 0\nomk = %f\nhubble = %f\n" "w = %f\ncs2_lam = 1\n" "omega_baryon = %f\nomega_cdm = %f\nomega_lambda = %f\nomega_neutrino = %f\n" "temp_cmb = %f\nhelium_fraction = %f\n" "massless_neutrinos = 3.046\nmassive_neutrinos = 0\n" "nu_mass_eigenstates = 0\nnu_mass_degeneracies = 0\nnu_mass_fractions = 1\n" "initial_power_num = 1\npivot_scalar = 0.05\npivot_tensor = 0.05\nscalar_amp(1) = %e\nscalar_spectral_index(1) = %f\nscalar_nrun(1) = 0\ntensor_spectral_index(1) = 0\ninitial_ratio(1) = 1\n" "reionization = T\n" "re_use_optical_depth = T\nre_optical_depth = 0.09\nre_redshift = 11\nre_delta_redshift = 1.5\nre_ionization_frac = -1\n" "RECFAST_fudge = 1.14\nRECFAST_fudge_He = 0.86\nRECFAST_Heswitch = 6\nRECFAST_Hswitch = T\n" "initial_condition = 1\ninitial_vector = -1 0 0 0 0\n" "vector_mode = 0\n" "COBE_normalize = F\nCMB_outputscale = 7.42835025e12\n" "transfer_high_precision = F\ntransfer_kmax = %f\ntransfer_k_per_logint = 0\ntransfer_num_redshifts = 2\ntransfer_interp_matterpower = T\ntransfer_redshift(1) = %f\ntransfer_redshift(2) = %f\ntransfer_filename(1) = transfer_out100.dat\ntransfer_filename(2) = transfer_out99.dat\ntransfer_matterpower(1) = matterpower100.dat\ntransfer_matterpower(2) = matterpower99.dat\n" "scalar_output_file = scalCls.dat\nvector_output_file = vecCls.dat\ntensor_output_file = tensCls.dat\ntotal_output_file = totCls.dat\nlensed_output_file = lensedCls.dat\nlensed_total_output_file =lensedtotCls.dat\nlens_potential_output_file = lenspote ntialCls.dat\nFITS_filename = scalCls.fits\n" "do_lensing_bispectrum = F\ndo_primordial_bispectrum = F\n" "bispectrum_nfields = 1\nbispectrum_slice_base_L = 0\nbispectrum_ndelta=3\nbispectrum_delta(1)=0\nbispectrum_delta(2)=2\nbispectrum_delta(3)=4\nbispectrum_do_fisher= F\nbispectrum_fisher_noise=0\nbispectrum_fisher_noise_pol=0\nbispectrum_fisher_fwhm_arcmin=7\nbispec trum_full_output_file=\nbispectrum_full_output_sparse=F\nbispectrum_export_alpha_beta=F\n" "feedback_level = 1\n" "derived_parameters = F\n" "lensing_method = 1\naccurate_BB = F\n" "massive_nu_approx = 1\n" "accurate_polarization = T\n" "accurate_reionization = T\n" "do_tensor_neutrinos = T\n" "do_late_rad_truncation = T\n" "number_of_threads = 0\n" "high_accuracy_default=T\n" "accuracy_boost = 1\n" "l_accuracy_boost = 1\n" "l_sample_boost = 1\n", cosmo.Ok, cosmo.H0, cosmo.w, cosmo.Ob, cosmo.Odm, cosmo.Ol, cosmo.Onu, cosmo.T, cosmo.Yhe, cosmo.A, cosmo.ns, kmax, zini + 1.0, zini); fclose(camb); } /*Define the window function for the sigma's calculation*/ float W(float k, float R){ float resp; resp = 3.0/(pow(k*R,2))*(sin(k*R)/(k*R) - cos(k*R)); return resp; } /*Evaluate the square root of matter variance*/ float calc_sigma(float *k, float *P, int cont, float R){ int i; float resp; resp = 0.0; for(i=0;i<cont-2;i++) resp += (k[i+1] - k[i])/2.0*(P[i]*pow(k[i],2)*pow(W(k[i],R),2) + P[i+1]*pow(k[i+1],2)*pow(W(k[i+1],R),2)); return resp/(2.0*PI*PI); } int main(int argc,char *argv[]) { FILE *param, *camb; char paramfile[20]; float Rmin, Rmax, n, fr0, zini; float k[1000], Pini[1000], Paux[1000], d[1000], v[1000], P[1000], aini, aaux, h, a; float q1, q2, q3, q4, k1, k2, k3, k4, trash; float sigma[100], R[100]; int cont, i, j, N; if (argc != 2){ printf("Wrong number of arguments.\n"); printf("arg1: The name of parameters file.\n\n"); exit(0); } sprintf(paramfile,"%s", argv[1]); /*Open the oparameters file*/ param = fopen(paramfile, "r"); if (param == NULL) { printf("Unable to open %s\n",paramfile); exit(0); } /*Read the parameters*/ fscanf(param,"%d", &model); fscanf(param,"%f %f", &Rmin, &Rmax); if(model == 1) fscanf(param,"%f %f %f", &hs.n, &hs.fr0, &trash); if(model == 2) fscanf(param,"%f %f %f", &sym.zssb, &sym.beta, &sym.L); fscanf(param,"%f %f %f %f %f %f %f %f %f %f %f", &cosmo.H0, &cosmo.w, &cosmo.Odm, &cosmo.Ob, &cosmo.Ol, &cosmo.Ok, &cosmo.Onu, &cosmo.Yhe, &cosmo.A, &cosmo.ns, &cosmo.T); cosmo.Om = cosmo.Ob + cosmo.Odm; zini = 99.0; fclose(param); printf("%d %f %f %f\n", model, sym.zssb, sym.beta, sym.L); /*Create the params.ini file for camb*/ Create_Params_ini(Rmin, zini); /*Run the camb in the zini*/ system("~/Documentos/CAMB/camb/camb params_mg.ini"); /*Read the CAMB output (z=99)*/ camb = fopen("test_matterpower99.dat", "r"); if (camb == NULL) { printf("Unable to open %s\n", "test_matterpower99.dat"); exit(0); } cont = 0; while(!feof(camb)){ fscanf(camb,"%f %f", &k[cont], &Pini[cont]); cont ++; } fclose(camb); /*Read the aux power spectrum (z=100)*/ camb = fopen("test_matterpower100.dat", "r"); if (camb == NULL) { printf("Unable to open %s\n", "test_matterpower100.dat"); exit(0); } cont = 0; while(!feof(camb)){ fscanf(camb,"%f %f", &k[cont], &Paux[cont]); cont ++; } fclose(camb); /*Delete the crated files*/ system("rm test_params.ini test_transfer_out99.dat test_transfer_out100.dat test_matterpower99.dat test_matterpower100.dat"); /*Construct the initial condictions*/ aini = 1.0/(1.0 + zini); aaux = 1.0/(2.0 + zini); for(i=0;i<cont;i++){ d[i] = sqrt(Pini[i]); v[i] = (d[i] - sqrt(Paux[i]))/(aini - aaux); } /*Solve the EDO*/ N = Ndel; /*number of steps*/ h = (1.0 - aini)/(N-1); /*Evaluate the background symmetron evolution if(model == 2){ u = (float *)malloc((Nphi*N)*sizeof(float)); phi(aini, 1.0/(1.0 + sym.zssb), pow(2997.92/(sqrt(2.0)*sym.L), 2.0), u, Nphi*N); } for(i=0;i<Nphi*N;i++) printf("%f\n", u[i]); */ omp_set_num_threads(6); #pragma omp parallel for private(i, q1, q2, q3, q4, k1, k2, k3, k4, a) for(j=0;j<cont;j++){ /*Solve the EDO for each k using Runge-Kutta 4*/ a = aini; for(i=0;i<N;i++){ q1 = dd(a, d[j], v[j]); k1 = dv(k[j], a, d[j], v[j]); q2 = dd(a+h/2.0, d[j]+h/2.0*q1, v[j]+h/2.0*k1); k2 = dv(k[j], a+h/2.0, d[j]+h/2.0*q1, v[j]+h/2.0*k1); q3 = dd(a+h/2.0, d[j]+h/2.0*q2, v[j]+h/2.0*k2); k3 = dv(k[j], a+h/2.0, d[j]+h/2.0*q2, v[j]+h/2.0*k2); q4 = dd(a+h, d[j]+h*q3, v[j]+h*k3); k4 = dv(k[j], a+h, d[j]+h*q3, v[j]+h*k3); d[j] = d[j] + h/6.0*(q1 + 2.0*q2 + 2.0*q3 + q4); v[j] = v[j] + h/6.0*(k1 + 2.0*k2 + 2.0*k3 + k4); a = a + h; } } for(i=0;i<cont;i++) P[i] = d[i]*d[i]; camb = fopen("Matter_Power_MG.dat", "w"); if (camb == NULL) { printf("Unable to open %s\n", "Matter_Power_MG.dat"); exit(0); } for(i=0;i<cont-1;i++) fprintf(camb,"%f %f\n", k[i], P[i]); fclose(camb); /*Evaluating the radius array in log10 space*/ h = (log10(Rmax) - log10(Rmin))/99.0; for(i=0;i<100;i++) R[i] = pow(10, log10(Rmin) + i*h); /*Evaluating the square root of variance (\sigma)*/ omp_set_num_threads(6); #pragma omp parallel for private(i) for(i=0;i<100;i++) sigma[i] = sqrt(calc_sigma(k, P, cont, R[i])); /*Print the variance*/ camb = fopen("Sigma_MG.dat", "w"); if (camb == NULL) { printf("Unable to open %s\n", "Sigma_MG.dat"); exit(0); } for(i=0;i<100;i++) fprintf(camb,"%f %f\n", R[i], sigma[i]); fclose(camb); /*Print the seigma_8*/ printf("Sigma_8 = %f\n", sqrt(calc_sigma(k, P, cont, 8.0))); return 0; }
GB_unaryop__minv_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_uint64 // op(A') function: GB_tran__minv_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_uint64 ( uint64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 1; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<8)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt =(n-1)/8; padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); #pragma omp parallel for reduction(+:h) for (unsigned int i=0;i<p-1;i++) { if (modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i); } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
2761.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp target teams distribute for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { #pragma omp simd for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
GB_binop__islt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint16) // A*D function (colscale): GB (_AxD__islt_uint16) // D*A function (rowscale): GB (_DxB__islt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint16) // C=scalar+B GB (_bind1st__islt_uint16) // C=scalar+B' GB (_bind1st_tran__islt_uint16) // C=A+scalar GB (_bind2nd__islt_uint16) // C=A'+scalar GB (_bind2nd_tran__islt_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT16 || GxB_NO_ISLT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__islt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__islt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
flow_cutter.h
#ifndef FLOW_CUTTER_H #define FLOW_CUTTER_H #include "tiny_id_func.h" #include "array_id_func.h" #include "id_string.h" #include "id_func.h" #include "dijkstra.h" #include "min_max.h" #include <vector> #include <algorithm> #include <sstream> #include <random> #include <memory> #include <omp.h> #include "flow_cutter_config.h" #include <iostream> #include <iomanip> using namespace std; namespace flow_cutter{ template<class Tail, class Head, class BackArc, class ArcWeight, class Capacity, class OutArc> struct Graph{ Graph( Tail tail, Head head, BackArc back_arc, ArcWeight arc_weight, Capacity capacity, OutArc out_arc ): tail(std::move(tail)), head(std::move(head)), back_arc(std::move(back_arc)), arc_weight(std::move(arc_weight)), capacity(std::move(capacity)), out_arc(std::move(out_arc)){} Tail tail; Head head; BackArc back_arc; //NodeWeight node_weight; ArcWeight arc_weight; Capacity capacity; OutArc out_arc; int node_count()const{ return tail.image_count(); } int arc_count()const{ return tail.preimage_count(); } }; //! Each threads needs its own TemporaryData object. struct TemporaryData{ TemporaryData(){} explicit TemporaryData(int node_count): node_space(node_count){} ArrayIDFunc<int>node_space; }; template<class Tail, class Head, class BackArc, class ArcWeight, class Capacity, class OutArc> Graph<Tail, Head, BackArc, ArcWeight, Capacity, OutArc> make_graph( Tail tail, Head head, BackArc back_arc, ArcWeight arc_weight, Capacity capacity, OutArc out_arc ){ return {std::move(tail), std::move(head), std::move(back_arc), std::move(arc_weight), std::move(capacity), std::move(out_arc)}; } template<class Tail, class Head, class BackArc, class OutArc> Graph< Tail, Head, BackArc, ConstIntIDFunc<1>, ConstIntIDFunc<1>, OutArc > make_graph( const Tail&tail, const Head&head, const BackArc&back_arc, const OutArc&out_arc ){ return { std::move(tail), std::move(head), std::move(back_arc), ConstIntIDFunc<1>(tail.preimage_count()), ConstIntIDFunc<1>(tail.preimage_count()), std::move(out_arc) }; } class PseudoDepthFirstSearch{ public: template<class Graph, class WasNodeSeen, class SeeNode, class ShouldFollowArc, class OnNewArc> void operator()( const Graph&graph, TemporaryData&tmp, int source_node, const WasNodeSeen&was_node_seen, const SeeNode&see_node, const ShouldFollowArc&should_follow_arc, const OnNewArc&on_new_arc )const{ int stack_end = 1; auto&stack = tmp.node_space; stack[0] = source_node; while(stack_end != 0){ int x = stack[--stack_end]; for(auto xy : graph.out_arc(x)){ on_new_arc(xy); int y = graph.head(xy); if(!was_node_seen(y)){ if(should_follow_arc(xy)){ if(!see_node(y)) return; stack[stack_end++] = y; } } } } } }; class BreadthFirstSearch{ public: template<class Graph, class WasNodeSeen, class SeeNode, class ShouldFollowArc, class OnNewArc> void operator()( const Graph&graph, TemporaryData&tmp, int source_node, const WasNodeSeen&was_node_seen, const SeeNode&see_node, const ShouldFollowArc&should_follow_arc, const OnNewArc&on_new_arc )const{ int queue_begin = 0, queue_end = 1; auto&queue = tmp.node_space; queue[0] = source_node; while(queue_begin != queue_end){ int x = queue[queue_begin++]; for(auto xy : graph.out_arc(x)){ on_new_arc(xy); int y = graph.head(xy); if(!was_node_seen(y)){ if(should_follow_arc(xy)){ if(!see_node(y)) return; queue[queue_end++] = y; } } } } } }; struct UnitFlow{ UnitFlow(){} explicit UnitFlow(int preimage_count):flow(preimage_count){} void clear(){ flow.fill(1); } int preimage_count()const{ return flow.preimage_count(); } template<class Graph> void increase(const Graph&graph, int a){ auto f = flow(a); assert((f == 0 || f == 1) && "Flow is already maximum; can not be increased"); assert(flow(graph.back_arc(a)) == 2-f && "Back arc has invalid flow"); ++f; flow.set(a, f); flow.set(graph.back_arc(a), 2-f); } template<class Graph> void decrease(const Graph&graph, int a){ auto f = flow(a); assert((f == 1 || f == 2) && "Flow is already minimum; can not be decreased"); assert(flow(graph.back_arc(a)) == 2-f && "Back arc has invalid flow"); --f; flow.set(a, f); flow.set(graph.back_arc(a), 2-f); } int operator()(int a)const{ return static_cast<int>(flow(a))-1; } void swap(UnitFlow&o){ flow.swap(o.flow); } TinyIntIDFunc<2>flow; }; class BasicNodeSet{ public: template<class Graph> explicit BasicNodeSet(const Graph&graph): node_count_inside_(0), inside_flag(graph.node_count()), extra_node(-1){} void clear(){ node_count_inside_ = 0; inside_flag.fill(false); } bool can_grow()const{ return extra_node != -1; } template<class Graph, class SearchAlgorithm, class OnNewNode, class ShouldFollowArc, class OnNewArc> void grow( const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const OnNewNode&on_new_node, // on_new_node(x) is called for every node x. If it returns false then the search is stopped, if it returns true it continues const ShouldFollowArc&should_follow_arc, // is called for a subset of arcs and must say whether the arc sould be followed const OnNewArc&on_new_arc // on_new_arc(xy) is called for ever arc xy with x in the set ){ assert(can_grow()); auto see_node = [&](int x){ assert(!inside_flag(x)); inside_flag.set(x, true); ++this->node_count_inside_; return on_new_node(x); }; auto was_node_seen = [&](int x){ return inside_flag(x); }; search_algo(graph, tmp, extra_node, was_node_seen, see_node, should_follow_arc, on_new_arc); extra_node = -1; } template<class Graph> void set_extra_node(const Graph&graph, int x){ assert(!inside_flag(x)); assert(extra_node == -1); inside_flag.set(x, true); ++node_count_inside_; extra_node = x; } bool is_inside(int x) const { return inside_flag(x); } int node_count_inside() const { return node_count_inside_; } int max_node_count_inside() const { return inside_flag.preimage_count(); } private: int node_count_inside_; BitIDFunc inside_flag; int extra_node; }; class ReachableNodeSet; class AssimilatedNodeSet{ friend class ReachableNodeSet; public: template<class Graph> explicit AssimilatedNodeSet(const Graph&graph): node_set(graph){} void clear(){ node_set.clear(); front.clear(); } template<class Graph> void set_extra_node(const Graph&graph, int x){ node_set.set_extra_node(graph, x); } bool can_grow()const{ return node_set.can_grow(); } template<class Graph, class SearchAlgorithm, class OnNewNode, class ShouldFollowArc, class OnNewArc, class HasFlow> void grow( const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const OnNewNode&on_new_node, // on_new_node(x) is called for every node x. If it returns false then the search is stopped, if it returns true it continues const ShouldFollowArc&should_follow_arc, // is called for a subset of arcs and must say whether the arc sould be followed const OnNewArc&on_new_arc, // on_new_arc(xy) is called for ever arc xy with x in the set const HasFlow&has_flow ){ auto my_on_new_arc = [&](int xy){ if(has_flow(xy)) front.push_back(xy); on_new_arc(xy); }; node_set.grow(graph, tmp, search_algo, on_new_node, should_follow_arc, my_on_new_arc); } bool is_inside(int x) const { return node_set.is_inside(x); } int node_count_inside() const { return node_set.node_count_inside(); } int max_node_count_inside() const { return node_set.max_node_count_inside(); } template<class Graph> void shrink_cut_front(const Graph&graph){ front.erase( std::remove_if( front.begin(), front.end(), [&](int xy){ return node_set.is_inside(graph.head(xy)); } ), front.end() ); } const std::vector<int>&get_cut_front() const { return front; } private: BasicNodeSet node_set; std::vector<int>front; }; class ReachableNodeSet{ public: template<class Graph> explicit ReachableNodeSet(const Graph&graph): node_set(graph), predecessor(graph.node_count()){} void reset(const AssimilatedNodeSet&other){ node_set = other.node_set; } void clear(){ node_set.clear(); } template<class Graph> void set_extra_node(const Graph&graph, int x){ node_set.set_extra_node(graph, x); } bool can_grow()const{ return node_set.can_grow(); } template<class Graph, class SearchAlgorithm, class OnNewNode, class ShouldFollowArc, class OnNewArc> void grow( const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const OnNewNode&on_new_node, // on_new_node(x) is called for every node x. If it returns false then the search is stopped, if it returns true it continues const ShouldFollowArc&should_follow_arc, // is called for a subset of arcs and must say whether the arc sould be followed const OnNewArc&on_new_arc // on_new_arc(xy) is called for ever arc xy with x in the set ){ auto my_should_follow_arc = [&](int xy){ predecessor[graph.head(xy)] = xy; return should_follow_arc(xy); }; node_set.grow(graph, tmp, search_algo, on_new_node, my_should_follow_arc, on_new_arc); } bool is_inside(int x) const { return node_set.is_inside(x); } int node_count_inside() const { return node_set.node_count_inside(); } int max_node_count_inside() const { return node_set.max_node_count_inside(); } template<class Graph, class IsSource, class OnNewArc> void forall_arcs_in_path_to(const Graph&graph, const IsSource&is_source, int target, const OnNewArc&on_new_arc){ int x = target; while(!is_source(x)){ on_new_arc(predecessor[x]); x = graph.tail(predecessor[x]); } } private: BasicNodeSet node_set; ArrayIDFunc<int>predecessor; }; struct SourceTargetPair{ int source, target; }; struct CutterStateDump{ BitIDFunc source_assimilated, target_assimilated, source_reachable, target_reachable, flow; }; class BasicCutter{ public: template<class Graph> explicit BasicCutter(const Graph&graph): assimilated{AssimilatedNodeSet(graph), AssimilatedNodeSet(graph)}, reachable{ReachableNodeSet(graph), ReachableNodeSet(graph)}, flow(graph.arc_count()), cut_available(false) {} template<class Graph, class SearchAlgorithm> void init(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, SourceTargetPair p){ assimilated[source_side].clear(); reachable[source_side].clear(); assimilated[target_side].clear(); reachable[target_side].clear(); flow.clear(); assimilated[source_side].set_extra_node(graph, p.source); reachable[source_side].set_extra_node(graph, p.source); assimilated[target_side].set_extra_node(graph, p.target); reachable[target_side].set_extra_node(graph, p.target); grow_reachable_sets(graph, tmp, search_algo, source_side); grow_assimilated_sets(graph, tmp, search_algo); cut_available = true; check_invariants(graph); } CutterStateDump dump_state()const{ return { id_func( assimilated[source_side].max_node_count_inside(), [&](int x){ return assimilated[source_side].is_inside(x); } ), id_func( assimilated[target_side].max_node_count_inside(), [&](int x){ return assimilated[target_side].is_inside(x); } ), id_func( assimilated[source_side].max_node_count_inside(), [&](int x){ return reachable[source_side].is_inside(x); } ), id_func( assimilated[target_side].max_node_count_inside(), [&](int x){ return reachable[target_side].is_inside(x); } ), id_func( flow.preimage_count(), [&](int xy){ return flow(xy) != 0; } ) }; } //! Returns true if a new cut was found. Returns false if no cut was found. False implies that no cut //! will be found in the future. Repeatly calling this function after it returned false does not do //! anything. template<class Graph, class SearchAlgorithm, class ScorePierceNode> bool advance(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const ScorePierceNode&score_pierce_node){ assert(cut_available); check_invariants(graph); int side = get_current_cut_side(); if(assimilated[side].node_count_inside() >= graph.node_count()/2){ cut_available = false; return false; } int pierce_node = select_pierce_node(graph, side, score_pierce_node); if(pierce_node == -1){ cut_available = false; return false; } assert(!assimilated[1-side].is_inside(pierce_node)); assimilated[side].set_extra_node(graph, pierce_node); reachable[side].set_extra_node(graph, pierce_node); grow_reachable_sets(graph, tmp, search_algo, side); grow_assimilated_sets(graph, tmp, search_algo); check_invariants(graph); cut_available = true; return true; } bool is_cut_available()const{ return cut_available; } template<class Graph, class ScorePierceNode> bool does_next_advance_increase_cut(const Graph&graph, const ScorePierceNode&score_pierce_node){ int side = get_current_cut_side(); if(assimilated[side].node_count_inside() >= graph.node_count()/2){ return true; } int pierce_node = select_pierce_node(graph, side, score_pierce_node); if(pierce_node == -1) return true; else if(reachable[1-side].is_inside(pierce_node)) return true; else return false; } bool is_on_smaller_side(int x)const{ return assimilated[get_current_cut_side()].is_inside(x); } static const int source_side = 0; static const int target_side = 1; int get_current_cut_side()const{ if( reachable[source_side].node_count_inside() == assimilated[source_side].node_count_inside() && ( reachable[target_side].node_count_inside() != assimilated[target_side].node_count_inside() || assimilated[source_side].node_count_inside() <= assimilated[target_side].node_count_inside() ) ) return source_side; else return target_side; } int get_current_smaller_cut_side_size()const{ return assimilated[get_current_cut_side()].node_count_inside(); } const std::vector<int>&get_current_cut()const{ return assimilated[get_current_cut_side()].get_cut_front(); } int get_assimilated_node_count()const{ return assimilated[source_side].node_count_inside() + assimilated[target_side].node_count_inside(); } private: template<class Graph, class ScorePierceNode> int select_pierce_node(const Graph&graph, int side, const ScorePierceNode&score_pierce_node){ int pierce_node = -1; int max_score = std::numeric_limits<int>::min(); for(auto xy : assimilated[side].get_cut_front()){ int y = graph.head(xy); if(!assimilated[1-side].is_inside(y)){ int score = score_pierce_node(y, side, reachable[1-side].is_inside(y), graph.arc_weight(xy)); if(score > max_score){ max_score = score; pierce_node = y; } } } return pierce_node; } template<class Graph> bool is_saturated(const Graph&graph, int direction, int xy){ if(direction == target_side) xy = graph.back_arc(xy); return graph.capacity(xy) == flow(xy); } template<class Graph, class SearchAlgorithm> void grow_reachable_sets(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, int pierced_side){ int my_source_side = pierced_side; int my_target_side = 1-pierced_side; assert(reachable[pierced_side].can_grow()); auto is_forward_saturated = [&,this](int xy){ return this->is_saturated(graph, my_source_side, xy); }; auto is_backward_saturated = [&,this](int xy){ return this->is_saturated(graph, my_target_side, xy); }; auto is_source = [&](int x){ return assimilated[my_source_side].is_inside(x); }; auto is_target = [&](int x){ return assimilated[my_target_side].is_inside(x); }; auto increase_flow = [&](int xy){ if(pierced_side == source_side) flow.increase(graph, xy); else flow.decrease(graph, xy); }; bool was_flow_augmented = false; int target_hit; do{ target_hit = -1; auto on_new_node = [&](int x){ if(is_target(x)){ target_hit = x; return false; } else return true; }; auto should_follow_arc = [&](int xy){ return !is_forward_saturated(xy); }; auto on_new_arc = [](int xy){}; reachable[my_source_side].grow(graph, tmp, search_algo, on_new_node, should_follow_arc, on_new_arc); if(target_hit != -1){ check_flow_conservation(graph); reachable[my_source_side].forall_arcs_in_path_to(graph, is_source, target_hit, increase_flow); check_flow_conservation(graph); reachable[my_source_side].reset(assimilated[my_source_side]); was_flow_augmented = true; check_flow_conservation(graph); } }while(target_hit != -1); if(was_flow_augmented){ reachable[my_target_side].reset(assimilated[my_target_side]); auto on_new_node = [&](int x){return true;}; auto should_follow_arc = [&](int xy){ return !is_backward_saturated(xy); }; auto on_new_arc = [](int xy){}; reachable[my_target_side].grow(graph, tmp, search_algo, on_new_node, should_follow_arc, on_new_arc); } } template<class Graph, class SearchAlgorithm> void grow_assimilated_sets(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo){ auto is_forward_saturated = [&,this](int xy){ return this->is_saturated(graph, source_side, xy); }; auto is_backward_saturated = [&,this](int xy){ return this->is_saturated(graph, target_side, xy); }; if(reachable[source_side].node_count_inside() <= reachable[target_side].node_count_inside()){ auto on_new_node = [&](int x){return true;}; auto should_follow_arc = [&](int xy){ return !is_forward_saturated(xy); }; auto on_new_arc = [](int xy){}; auto has_flow = [&](int xy){ return flow(xy) != 0; }; assimilated[source_side].grow(graph, tmp, search_algo, on_new_node, should_follow_arc, on_new_arc, has_flow); assimilated[source_side].shrink_cut_front(graph); }else{ auto on_new_node = [&](int x){return true;}; auto should_follow_arc = [&](int xy){ return !is_backward_saturated(xy); }; auto on_new_arc = [](int xy){}; auto has_flow = [&](int xy){ return flow(xy) != 0; }; assimilated[target_side].grow(graph, tmp, search_algo, on_new_node, should_follow_arc, on_new_arc, has_flow); assimilated[target_side].shrink_cut_front(graph); } } template<class Graph> void check_flow_conservation(const Graph&graph){ #ifndef NDEBUG for(int x=0; x<graph.node_count(); ++x) if(!assimilated[source_side].is_inside(x) && !assimilated[target_side].is_inside(x)){ int flow_surplus = 0; for(auto xy : graph.out_arc(x)) flow_surplus += flow(xy); assert(flow_surplus == 0 && "Flow must be conserved outside of the assimilated sides"); } #endif } template<class Graph> void check_invariants(const Graph&graph){ #ifndef NDEBUG for(int side = 0; side < 2; ++side) assert(assimilated[side].node_count_inside() > 0 && "Each side must contain at least one node"); for(int x=0; x<graph.node_count(); ++x) assert((!assimilated[source_side].is_inside(x) || !assimilated[target_side].is_inside(x)) && "a node can not be assimilated by both sides"); for(int side = 0; side < 2; ++side) for(int x=0; x<graph.node_count(); ++x) if(assimilated[side].is_inside(x)) assert(reachable[side].is_inside(x) && "assimilated must be a subset of reachable"); check_flow_conservation(graph); int smaller_reachable_side; if(reachable[source_side].node_count_inside() <= reachable[target_side].node_count_inside()) smaller_reachable_side = source_side; else smaller_reachable_side = target_side; assert(reachable[smaller_reachable_side].node_count_inside() == assimilated[smaller_reachable_side].node_count_inside()); for(int x=0; x<graph.node_count(); ++x) assert(reachable[smaller_reachable_side].is_inside(x) == assimilated[smaller_reachable_side].is_inside(x)); assert(!reachable[source_side].can_grow()); assert(!reachable[target_side].can_grow()); assert(!assimilated[smaller_reachable_side].can_grow()); #endif } AssimilatedNodeSet assimilated[2]; ReachableNodeSet reachable[2]; UnitFlow flow; bool cut_available; }; enum class DistanceType{ no_distance, hop_distance, weighted_distance }; class DistanceAwareCutter{ private: template<class Graph> static void compute_hop_distance_from(const Graph&graph, TemporaryData&tmp, int source, ArrayIDFunc<int>&dist){ dist.fill(std::numeric_limits<int>::max()); dist[source] = 0; auto was_node_seen = [&](int x){return false;}; auto see_node = [](int x){ return true; }; auto should_follow_arc = [&](int xy){ if(dist(graph.tail(xy)) < dist(graph.head(xy)) - 1){ dist[graph.head(xy)] = dist(graph.tail(xy))+1; return true; }else{ return false; } }; auto on_new_arc = [&](int xy){}; BreadthFirstSearch()(graph, tmp, source, was_node_seen, see_node, should_follow_arc, on_new_arc); } template<class Graph> static void compute_weighted_distance_from(const Graph&graph, TemporaryData&tmp, int source, ArrayIDFunc<int>&dist){ Dijkstra<BitIDFunc>dij(graph.node_count()); dij.clear(); dij.add_source_node(source); while(!dij.is_finished()) dij.settle_next(graph.out_arc, graph.head, graph.arc_weight, [](int,bool,int){}); dist = dij.move_distance_array(); } public: template<class Graph> DistanceAwareCutter(const Graph&graph): cutter(graph), node_dist{ArrayIDFunc<int>{graph.node_count()}, ArrayIDFunc<int>{graph.node_count()}}{} template<class Graph, class SearchAlgorithm> void init(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, DistanceType dist_type, SourceTargetPair p, int random_seed){ cutter.init(graph, tmp, search_algo, p); rng.seed(random_seed); switch(dist_type){ case DistanceType::hop_distance: compute_hop_distance_from(graph, tmp, p.source, node_dist[source_side]); compute_hop_distance_from(graph, tmp, p.target, node_dist[target_side]); break; case DistanceType::weighted_distance: compute_weighted_distance_from(graph, tmp, p.source, node_dist[source_side]); compute_weighted_distance_from(graph, tmp, p.target, node_dist[target_side]); break; case DistanceType::no_distance: break; default: assert(false); break; } } CutterStateDump dump_state()const{ return cutter.dump_state(); } template<class Graph, class SearchAlgorithm, class ScorePierceNode> bool advance(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const ScorePierceNode&score_pierce_node){ auto my_score_pierce_node = [&](int x, int side, bool causes_augmenting_path, int arc_weight){ return score_pierce_node(x, side, causes_augmenting_path, arc_weight, node_dist[side](x), node_dist[1-side](x)); }; return cutter.advance(graph, tmp, search_algo, my_score_pierce_node); } bool is_cut_available()const{ return cutter.is_cut_available(); } template<class Graph, class ScorePierceNode> bool does_next_advance_increase_cut(const Graph&graph, const ScorePierceNode&score_pierce_node){ auto my_score_pierce_node = [&](int x, int side, bool causes_augmenting_path, int arc_weight){ return score_pierce_node(x, side, causes_augmenting_path, arc_weight, node_dist[side](x), node_dist[1-side](x)); }; return cutter.does_next_advance_increase_cut(graph, my_score_pierce_node); } static const int source_side = BasicCutter::source_side; static const int target_side = BasicCutter::target_side; int get_current_cut_side()const{ return cutter.get_current_cut_side(); } int get_current_smaller_cut_side_size()const{ return cutter.get_current_smaller_cut_side_size(); } const std::vector<int>&get_current_cut()const{ return cutter.get_current_cut(); } int get_assimilated_node_count()const{ return cutter.get_assimilated_node_count(); } bool is_on_smaller_side(int x)const{ return cutter.is_on_smaller_side(x); } bool is_empty()const{ return node_dist[0].preimage_count() == 0; } private: BasicCutter cutter; ArrayIDFunc<int>node_dist[2]; mt19937 rng; }; class MultiCutter{ public: MultiCutter(){} template<class Graph, class SearchAlgorithm, class ScorePierceNode> void init( const Graph&graph, std::vector<TemporaryData>&tmp, const SearchAlgorithm&search_algo, const ScorePierceNode&score_pierce_node, DistanceType dist_type, const std::vector<SourceTargetPair>&p, int random_seed, bool should_skip_non_maximum_sides = true ){ while(cutter_list.size() > p.size()) cutter_list.pop_back(); // can not use resize because that requires default constructor... while(cutter_list.size() < p.size()) cutter_list.emplace_back(graph); #pragma omp parallel num_threads(tmp.size()) { int thread_id = omp_get_thread_num(); #pragma omp for schedule(dynamic) for(int i=0; i<(int)p.size(); ++i){ auto&x = cutter_list[i]; auto my_score_pierce_node = [&](int x, int side, bool causes_augmenting_path, int arc_weight, int source_dist, int target_dist){ return score_pierce_node(x, side, causes_augmenting_path, arc_weight, source_dist, target_dist, i); }; x.init(graph, tmp[thread_id], search_algo, dist_type, p[i], random_seed+1+i); if(should_skip_non_maximum_sides) while(!x.does_next_advance_increase_cut(graph, my_score_pierce_node)) x.advance(graph, tmp[thread_id], search_algo, my_score_pierce_node); } } int best_cutter_id = -1; int best_cut_size = std::numeric_limits<int>::max(); int best_cutter_weight = 0; for(int i=0; i<(int)p.size(); ++i){ auto&x = cutter_list[i]; if( (int)x.get_current_cut().size() < best_cut_size || ( (int)x.get_current_cut().size() == best_cut_size && x.get_current_smaller_cut_side_size() > best_cutter_weight ) ){ best_cutter_id = i; best_cut_size = x.get_current_cut().size(); best_cutter_weight = x.get_current_smaller_cut_side_size(); } } current_cutter_id = best_cutter_id; current_smaller_side_size = cutter_list[current_cutter_id].get_current_smaller_cut_side_size(); } CutterStateDump dump_state()const{ if(cutter_list.size() != 1) throw std::runtime_error("Can only dump the cutter state if a single instance is run"); return cutter_list[0].dump_state(); } template<class Graph, class SearchAlgorithm, class ScorePierceNode> bool advance(const Graph&graph, std::vector<TemporaryData>&tmp, const SearchAlgorithm&search_algo, const ScorePierceNode&score_pierce_node, bool should_skip_non_maximum_sides = true){ if(graph.node_count() /2 == get_current_smaller_cut_side_size()) return false; int current_cut_size = cutter_list[current_cutter_id].get_current_cut().size(); for(;;){ #pragma omp parallel num_threads(tmp.size()) { int thread_id = omp_get_thread_num(); #pragma omp for schedule(dynamic) for(int i=0; i<(int)cutter_list.size(); ++i){ auto x = std::move(cutter_list[i]); auto my_score_pierce_node = [&](int x, int side, bool causes_augmenting_path, int arc_weight, int source_dist, int target_dist){ return score_pierce_node(x, side, causes_augmenting_path, arc_weight, source_dist, target_dist, i); }; if(x.is_cut_available()){ if((int)x.get_current_cut().size() == current_cut_size){ assert(x.does_next_advance_increase_cut(graph, my_score_pierce_node)); if(x.advance(graph, tmp[thread_id], search_algo, my_score_pierce_node)){ assert((int)x.get_current_cut().size() > current_cut_size); while(!x.does_next_advance_increase_cut(graph, my_score_pierce_node)){ if(!x.advance(graph, tmp[thread_id], search_algo, my_score_pierce_node)) break; if(!should_skip_non_maximum_sides) break; } } } } cutter_list[i] = std::move(x); } } int next_cut_size = std::numeric_limits<int>::max(); for(auto&x:cutter_list) if(x.is_cut_available()) min_to(next_cut_size, (int)x.get_current_cut().size()); if(next_cut_size == std::numeric_limits<int>::max()) return false; int best_cutter_weight = 0; int best_cutter_id = -1; for(int i=0; i<(int)cutter_list.size(); ++i){ if(cutter_list[i].is_cut_available()){ if( (int)cutter_list[i].get_current_cut().size() == next_cut_size && cutter_list[i].get_current_smaller_cut_side_size() > best_cutter_weight ){ best_cutter_id = i; best_cutter_weight = cutter_list[i].get_current_smaller_cut_side_size(); } } } assert(best_cutter_id != -1); current_cut_size = next_cut_size; if(best_cutter_weight <= current_smaller_side_size) continue; current_cutter_id = best_cutter_id; current_smaller_side_size = cutter_list[current_cutter_id].get_current_smaller_cut_side_size(); return true; } } int get_current_smaller_cut_side_size()const{ return current_smaller_side_size; } bool is_on_smaller_side(int x)const{ return cutter_list[current_cutter_id].is_on_smaller_side(x); } const std::vector<int>&get_current_cut()const{ return cutter_list[current_cutter_id].get_current_cut(); } int get_current_cutter_id()const{ return current_cutter_id; } private: std::vector<DistanceAwareCutter>cutter_list; int current_smaller_side_size; int current_cutter_id; }; struct PierceNodeScore{ static constexpr unsigned hash_modulo = ((1u<<31u)-1u); unsigned hash_factor, hash_offset; PierceNodeScore(Config config): config(config){ std::mt19937 gen; gen.seed(config.random_seed); gen(); hash_factor = gen() % hash_modulo; hash_offset = gen() % hash_modulo; } Config config; int operator()(int x, int side, bool causes_augmenting_path, int arc_weight, int source_dist, int target_dist, int cutter_id)const{ auto random_number = [&]{ if(side == BasicCutter::source_side) return (hash_factor * (unsigned)(x<<1) + hash_offset) % hash_modulo; else return (hash_factor * ((unsigned)(x<<1)+1) + hash_offset) % hash_modulo; }; int score; switch(config.pierce_rating){ case Config::PierceRating::max_target_minus_source_hop_dist: case Config::PierceRating::max_target_minus_source_weight_dist: score = target_dist - source_dist; break; case Config::PierceRating::max_target_hop_dist: case Config::PierceRating::max_target_weight_dist: score = target_dist; break; case Config::PierceRating::min_source_hop_dist: case Config::PierceRating::min_source_weight_dist: score = -source_dist; break; case Config::PierceRating::oldest: score = 0; break; case Config::PierceRating::random: score = random_number(); break; case Config::PierceRating::max_arc_weight: score = arc_weight; break; case Config::PierceRating::min_arc_weight: score = -arc_weight; break; case Config::PierceRating::circular_hop: case Config::PierceRating::circular_weight: if(side == BasicCutter::source_side) return -source_dist; else return target_dist; break; default: assert(false); score = 0; } switch(config.avoid_augmenting_path){ case Config::AvoidAugmentingPath::avoid_and_pick_best: if(causes_augmenting_path) score -= 1000000000; break; case Config::AvoidAugmentingPath::do_not_avoid: break; case Config::AvoidAugmentingPath::avoid_and_pick_oldest: if(causes_augmenting_path) score = -1000000000; break; case Config::AvoidAugmentingPath::avoid_and_pick_random: if(causes_augmenting_path) score = random_number() - 1000000000; break; default: assert(false); score = 0; } return score; } }; template<class Graph> class SimpleCutter{ public: SimpleCutter(const Graph&graph, Config config): graph(graph), tmp(config.thread_count, TemporaryData(graph.node_count())), config(config){ } void init(const std::vector<SourceTargetPair>&p, int random_seed){ DistanceType dist_type; if( config.pierce_rating == Config::PierceRating::min_source_hop_dist || config.pierce_rating == Config::PierceRating::max_target_hop_dist || config.pierce_rating == Config::PierceRating::max_target_minus_source_hop_dist || config.pierce_rating == Config::PierceRating::circular_hop ) dist_type = DistanceType::hop_distance; else if( config.pierce_rating == Config::PierceRating::min_source_weight_dist || config.pierce_rating == Config::PierceRating::max_target_weight_dist || config.pierce_rating == Config::PierceRating::max_target_minus_source_weight_dist || config.pierce_rating == Config::PierceRating::circular_weight ) dist_type = DistanceType::weighted_distance; else dist_type = DistanceType::no_distance; switch(config.graph_search_algorithm){ case Config::GraphSearchAlgorithm::pseudo_depth_first_search: cutter.init(graph, tmp, PseudoDepthFirstSearch(), PierceNodeScore(config), dist_type, p, random_seed, config.skip_non_maximum_sides == Config::SkipNonMaximumSides::skip); break; case Config::GraphSearchAlgorithm::breadth_first_search: cutter.init(graph, tmp, BreadthFirstSearch(), PierceNodeScore(config), dist_type, p, random_seed, config.skip_non_maximum_sides == Config::SkipNonMaximumSides::skip); break; case Config::GraphSearchAlgorithm::depth_first_search: throw std::runtime_error("depth first search is not yet implemented"); default: assert(false); } } bool advance(){ switch(config.graph_search_algorithm){ case Config::GraphSearchAlgorithm::pseudo_depth_first_search: return cutter.advance(graph, tmp, PseudoDepthFirstSearch(), PierceNodeScore(config), config.skip_non_maximum_sides == Config::SkipNonMaximumSides::skip); case Config::GraphSearchAlgorithm::breadth_first_search: return cutter.advance(graph, tmp, BreadthFirstSearch(), PierceNodeScore(config), config.skip_non_maximum_sides == Config::SkipNonMaximumSides::skip); case Config::GraphSearchAlgorithm::depth_first_search: throw std::runtime_error("depth first search is not yet implemented"); default: assert(false); return false; } } CutterStateDump dump_state()const{ return cutter.dump_state(); } int get_current_smaller_cut_side_size()const{ return cutter.get_current_smaller_cut_side_size(); } bool is_on_smaller_side(int x)const{ return cutter.is_on_smaller_side(x); } const std::vector<int>&get_current_cut()const{ return cutter.get_current_cut(); } int get_current_cutter_id()const{ return cutter.get_current_cutter_id(); } private: const Graph&graph; std::vector<TemporaryData>tmp; MultiCutter cutter; Config config; }; template<class Graph> SimpleCutter<Graph> make_simple_cutter(const Graph&graph, Config config){ return SimpleCutter<Graph>(graph, config); } inline bool requires_non_negative_weights(Config config){ return config.pierce_rating == Config::PierceRating::min_source_weight_dist || config.pierce_rating == Config::PierceRating::max_target_weight_dist || config.pierce_rating == Config::PierceRating::max_target_minus_source_weight_dist; } std::vector<SourceTargetPair>select_random_source_target_pairs(int node_count, int cutter_count, int seed){ std::vector<SourceTargetPair>p(cutter_count); std::mt19937 rng(seed); std::uniform_int_distribution<int> dist(0, node_count-1); for(auto&x:p){ do{ x.source = dist(rng); x.target = dist(rng); }while(x.source == x.target); } return p; } } #endif
GB_binop.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB // A.*B function (eWiseMult): GB_AemultB // A*D function (colscale): GB_AxD // D*A function (rowscale): GB_DxB // C+=B function (dense accum): GB_Cdense_accumB // C+=b function (dense accum): GB_Cdense_accumb // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum // C=scalar+B GB_bind1st // C=scalar+B' GB_bind1st_tran // C=A+scalar GB_bind2nd // C=A'+scalar GB_bind2nd_tran // C type: GB_ctype // A type: GB_atype // B,b type: GB_btype // BinaryOp: GB_binaryop(cij,aij,bij,i,j) #define GB_ATYPE \ GB_atype #define GB_BTYPE \ GB_btype #define GB_CTYPE \ GB_ctype // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ GB_atype_is_btype // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ GB_ctype_is_atype // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ GB_ctype_is_btype // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GB_geta(aij,Ax,pA) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GB_getb(bij,Bx,pB) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GB_ctype t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ GB_copy_a_to_c(cij,Ax,pA) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ GB_copy_b_to_c(cij,Bx,pB) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ GB_binaryop(z, x, y, i, j) ; // op is second #define GB_OP_IS_SECOND \ GB_op_is_second // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ GB_op_is_plus_real // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ GB_op_is_minus_real // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ GB_cblas_axpy // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ GB_disable //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ if_is_binop_subset // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } endif_is_binop_subset //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else if_C_dense_update { #include "GB_dense_subassign_23_template.c" } endif_C_dense_update return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else if_C_dense_update { // get the scalar b for C += b, of type GB_btype GB_btype bwork = (*((GB_btype *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } endif_C_dense_update return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ if_binop_is_semiring_multiplier GrB_Info GB_AxD ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_ctype *GB_RESTRICT Cx = (GB_ctype *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } endif_binop_is_semiring_multiplier //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ if_binop_is_semiring_multiplier GrB_Info GB_DxB ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_ctype *GB_RESTRICT Cx = (GB_ctype *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } endif_binop_is_semiring_multiplier //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ if_binop_bind1st_is_enabled GrB_Info GB_bind1st ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_ctype *Cx = (GB_ctype *) Cx_output ; GB_atype x = (*((GB_atype *) x_input)) ; GB_btype *Bx = (GB_btype *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GB_getb(bij, Bx, p) ; GB_binaryop(Cx [p], x, bij, 0, 0) ; } return (GrB_SUCCESS) ; #endif } endif_binop_bind1st_is_enabled //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ if_binop_bind2nd_is_enabled GrB_Info GB_bind2nd ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GB_ctype *Cx = (GB_ctype *) Cx_output ; GB_atype *Ax = (GB_atype *) Ax_input ; GB_btype y = (*((GB_btype *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GB_geta(aij, Ax, p) ; GB_binaryop(Cx [p], aij, y, 0, 0) ; } return (GrB_SUCCESS) ; #endif } endif_binop_bind2nd_is_enabled //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ if_binop_bind1st_is_enabled // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GB_getb(aij, Ax, pA) ; \ GB_binaryop(Cx [pC], x, aij, 0, 0) ; \ } GrB_Info GB_bind1st_tran ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GB_btype #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_atype x = (*((const GB_atype *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GB_atype } endif_binop_bind1st_is_enabled //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ if_binop_bind2nd_is_enabled // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GB_geta(aij, Ax, pA) ; \ GB_binaryop(Cx [pC], aij, y, 0, 0) ; \ } GrB_Info GB_bind2nd_tran ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_btype y = (*((const GB_btype *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } endif_binop_bind2nd_is_enabled #endif
quicksort.c
#include <stdio.h> #include <stdlib.h> #define MAXSIZE 500000000 /* Maximum size of array*/ #define MAXWORKERS 12 /* Maximum amount of worker threads */ int size = MAXSIZE; int vector[MAXSIZE]; double start_time, end_time; /* start and end times */ int numWorkers; void Qsort(int first, int last) { int pivot, i_pivot, temp, left, right; if (first >= last) return; i_pivot = (first + last) / 2; pivot = vector[i_pivot]; left = first; right = last; while (left <= right) { if (vector[left] > pivot) { temp = vector[left]; vector[left] = vector[right]; vector[right] = temp; if (right == i_pivot) { i_pivot = left; } right--; } else { left++; } } // place the pivot in its place (i.e. swap with right element) temp = vector[right]; vector[right] = pivot; vector[i_pivot] = temp; // sort two sublists in parallel; #pragma omp task Qsort(first, (right - 1)); #pragma omp task Qsort((right + 1), last); } int main(int argc, char *argv[]) { int i; /* determine size */ size = (argc > 1) ? atoi(argv[1]) : MAXSIZE; if (size <= 0 || size > MAXSIZE) size = MAXSIZE; numWorkers = (argc > 2)? atoi(argv[2]) : MAXWORKERS; if (numWorkers > MAXWORKERS) numWorkers = MAXWORKERS; /* initialize and print the vector to be sorted */ for (i = 0; i < size; i++) vector[i] = (int) random () % MAXSIZE; #pragma omp parallel { #pragma omp single Qsort(0, (size - 1)); } for (i = 0; i < size - 1; i++) if (vector[i] > vector[i + 1]) { printf("The resulting vector is not sorted!\n"); } return(0); }
loopct_r4.c
/* * Input: ntabs nchannels padded_size * Output: ntabs ntimes -nchannels ; ntimes < padded_size * * We process a finished tab directly, so no need to build up the full ntabs array */ void deinterleave(const char *page, char *transposed, const int ntabs, const int nchannels, const int ntimes, const int padded_size) { int tab; for (tab = 0; tab < ntabs; tab++) { int channel; #pragma omp parallel for for (channel = 0; channel < nchannels; channel+=4) { const char *channelA = &page[(tab*nchannels + channel + 0)*padded_size]; const char *channelB = &page[(tab*nchannels + channel + 1)*padded_size]; const char *channelC = &page[(tab*nchannels + channel + 2)*padded_size]; const char *channelD = &page[(tab*nchannels + channel + 3)*padded_size]; int time; for (time = 0; time < ntimes; time++) { // reverse freq order to comply with header transposed[time*nchannels+nchannels-(channel+0)-1] = channelA[time]; transposed[time*nchannels+nchannels-(channel+1)-1] = channelB[time]; transposed[time*nchannels+nchannels-(channel+2)-1] = channelC[time]; transposed[time*nchannels+nchannels-(channel+3)-1] = channelD[time]; } } } }
average.c
#include<stdio.h> #include<omp.h> #define MAX 5 int main() { double ave=0.0, A[MAX]; int i; for (i=0; i<MAX; i++) { A[i] = i+1.0; } #pragma omp parallel for for (i=0; i<MAX; i++) { ave += A[i]; } ave /= MAX; printf("%f\n",ave); return 0; }
GB_binop__islt_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__islt_int16 // A.*B function (eWiseMult): GB_AemultB__islt_int16 // A*D function (colscale): GB_AxD__islt_int16 // D*A function (rowscale): GB_DxB__islt_int16 // C+=B function (dense accum): GB_Cdense_accumB__islt_int16 // C+=b function (dense accum): GB_Cdense_accumb__islt_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_int16 // C=scalar+B GB_bind1st__islt_int16 // C=scalar+B' GB_bind1st_tran__islt_int16 // C=A+scalar GB_bind2nd__islt_int16 // C=A'+scalar GB_bind2nd_tran__islt_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT16 || GxB_NO_ISLT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__islt_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__islt_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__islt_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__islt_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__islt_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__islt_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__islt_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__islt_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__islt_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__islt_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__islt_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/magick.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { double (*filter)(const double,const ResizeFilter *), (*window)(const double,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ ResizeWeightingFunctionType filterWeightingType, windowWeightingType; size_t signature; }; /* Forward declaractions. */ static double I0(double x), BesselOrderOne(double), Sinc(const double, const ResizeFilter *), SincFast(const double, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const double x,const double support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static double Blackman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.34+cosine*(0.5+cosine*0.16)); } static double Bohman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine = cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); magick_unreferenced(resize_filter); return((1.0-x)*cosine+(1.0/MagickPI)*sine); } static double Box(const double magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(x); magick_unreferenced(resize_filter); /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static double Cosine(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Cosine window function: cos((pi/2)*x). */ return(cos((double) (MagickPI2*x))); } static double CubicBC(const double x,const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static double CubicSpline(const double x,const ResizeFilter *resize_filter) { if (resize_filter->support <= 2.0) { /* 2-lobe Spline filter. */ if (x < 1.0) return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0); if (x < 2.0) return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0)); return(0.0); } if (resize_filter->support <= 3.0) { /* 3-lobe Spline filter. */ if (x < 1.0) return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0); if (x < 2.0) return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0)); if (x < 3.0) return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0)); return(0.0); } /* 4-lobe Spline filter. */ if (x < 1.0) return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0); if (x < 2.0) return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0)); if (x < 3.0) return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0)); if (x < 4.0) return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0)); return(0.0); } static double Gaussian(const double x,const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static double Hann(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.5+0.5*cosine); } static double Hamming(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); } static double Jinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return(0.5*MagickPI); return(BesselOrderOne(MagickPI*x)/x); } static double Kaiser(const double x,const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static double Lagrange(const double x,const ResizeFilter *resize_filter) { double value; register ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static double Quadratic(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static double Sinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } return((double) 1.0); } static double SincFast(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const double xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p); #endif } } static double Triangle(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static double Welch(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Welch parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RodidouxSharp is a slightly sharper version of Rodidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Rodidoux and % RodidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterType filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image, const FilterType filter,const MagickBooleanType cylindrical, ExceptionInfo *exception) { const char *artifact; FilterType filter_type, window_type; double B, C, value; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterType enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterType filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ { CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { double (*function)(const double,const ResizeFilter*), support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ ResizeWeightingFunctionType weightingFunctionType; } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */ { Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */ { Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction }, { Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */ { CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) exception; resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter)); (void) memset(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur=1.0; /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) && (filter != SincFastFilter)) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (IsStringTrue(artifact) != MagickFalse) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterType) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterType) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type= cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterType) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType; resize_filter->window=filters[window_type].function; resize_filter->windowWeightingType=filters[window_type].weightingFunctionType; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickCoreSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(double) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= 2*value; /* increase support linearly */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL)*MagickPI; /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(double) lobes; } if (resize_filter->filter == Jinc) { /* Convert a Jinc function lobes value to a real support value. */ if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long) resize_filter->support)-1]; /* Blur this filter so support is a integer value (lobes dependant). */ if (filter_type == LanczosRadiusFilter) resize_filter->blur*=floor(resize_filter->support)/ resize_filter->support; } /* Expert blur override. */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(double) MagickEpsilon; /* Expert override of the support setting. */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale*=PerceptibleReciprocal(resize_filter->window_support); /* Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } { const double twoB = B+B; /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout, "# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(),(double) resize_filter->blur); if ((filter_type == GaussianFilter) || (window_type == GaussianFilter)) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double) support); if ((filter_type == CubicFilter) || (window_type == CubicFilter)) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x, GetMagickPrecision(),(double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { Image *resize_image; resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % double BesselOrderOne(double x) % % A description of each parameter follows: % % o x: double value. % */ #undef I0 static double I0(double x) { double sum, t, y; register ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((double) i*i); } return(sum); } #undef J1 static double J1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static double P1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static double Q1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static double BesselOrderOne(double x) { double p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin(x)- cos(x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin(x)+cos(x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); resize_filter->signature=(~MagickCoreSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % double GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickPrivate double *GetResizeFilterCoefficient( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return((double *) resize_filter->coefficient); } MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->blur); } MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->scale); } MagickPrivate double GetResizeFilterWindowSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->window_support); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->filterWeightingType); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->windowWeightingType); } MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % double GetResizeFilterWeight(const ResizeFilter *resize_filter, % const double x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter, const double x) { double scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { PointInfo offset; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (Quantum *) NULL) continue; offset.y=((double) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait resize_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; offset.x=((double) x+0.5)*scale.x-0.5; status=InterpolatePixelChannels(image,image_view,resize_image,method, offset.x,offset.y,q,exception); if (status == MagickFalse) break; } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image,const size_t columns, % const size_t rows,const double delta_x,const double rigidity, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *image_view, *rescale_view; gfloat *packet, *pixels; Image *rescale_image; int x_offset, y_offset; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MemoryInfo *pixel_info; register gfloat *q; ssize_t y; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,exception)); pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info); status=MagickTrue; q=pixels; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) *q++=QuantumScale*p[i]; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows, (int) GetPixelChannels(image),LQR_COLDEPTH_32F); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } rescale_view=AcquireAuthenticCacheView(rescale_image,exception); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0) { register Quantum *magick_restrict p; register ssize_t i; p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1, exception); if (p == (Quantum *) NULL) break; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait rescale_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); rescale_traits=GetPixelChannelTraits(rescale_image,channel); if ((traits == UndefinedPixelTrait) || (rescale_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange* packet[i]),p); } if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void CopyPixels(const Quantum *source,const ssize_t source_offset, Quantum *destination,const ssize_t destination_offset,const size_t channels) { register ssize_t i; for (i=0; i < (ssize_t) channels; i++) destination[channels*destination_offset+i]=source[source_offset*channels+i]; } static inline void MixPixels(const Quantum *source,const ssize_t *source_offset, const size_t source_size,Quantum *destination, const ssize_t destination_offset,const size_t channels) { ssize_t sum; register ssize_t i; for (i=0; i < (ssize_t) channels; i++) { register ssize_t j; sum=0; for (j=0; j < (ssize_t) source_size; j++) sum+=source[source_offset[j]*channels+i]; destination[channels*destination_offset+i]=(Quantum) (sum/source_size); } } static inline void Mix2Pixels(const Quantum *source, const ssize_t source_offset1,const ssize_t source_offset2, Quantum *destination,const ssize_t destination_offset,const size_t channels) { const ssize_t offsets[2] = { source_offset1, source_offset2 }; MixPixels(source,offsets,2,destination,destination_offset,channels); } static inline int PixelsEqual(const Quantum *source1,ssize_t offset1, const Quantum *source2,ssize_t offset2,const size_t channels) { register ssize_t i; offset1*=channels; offset2*=channels; for (i=0; i < (ssize_t) channels; i++) if (source1[offset1+i] != source2[offset2+i]) return(0); return(1); } static inline void Eagle2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t i; (void) source; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if (PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,0,result,0,channels); if (PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels)) CopyPixels(pixels,2,result,1,channels); if (PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels)) CopyPixels(pixels,6,result,2,channels); if (PixelsEqual(pixels,5,pixels,8,channels) && PixelsEqual(pixels,8,pixels,7,channels)) CopyPixels(pixels,8,result,3,channels); } static void Hq2XHelper(const unsigned int rule,const Quantum *source, Quantum *destination,const ssize_t destination_offset,const size_t channels, const ssize_t e,const ssize_t a,const ssize_t b,const ssize_t d, const ssize_t f,const ssize_t h) { #define caseA(N,A,B,C,D) \ case N: \ { \ const ssize_t \ offsets[4] = { A, B, C, D }; \ \ MixPixels(source,offsets,4,destination,destination_offset,channels);\ break; \ } #define caseB(N,A,B,C,D,E,F,G,H) \ case N: \ { \ const ssize_t \ offsets[8] = { A, B, C, D, E, F, G, H }; \ \ MixPixels(source,offsets,8,destination,destination_offset,channels);\ break; \ } switch (rule) { case 0: { CopyPixels(source,e,destination,destination_offset,channels); break; } caseA(1,e,e,e,a) caseA(2,e,e,e,d) caseA(3,e,e,e,b) caseA(4,e,e,d,b) caseA(5,e,e,a,b) caseA(6,e,e,a,d) caseB(7,e,e,e,e,e,b,b,d) caseB(8,e,e,e,e,e,d,d,b) caseB(9,e,e,e,e,e,e,d,b) caseB(10,e,e,d,d,d,b,b,b) case 11: { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); break; } case 12: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 13: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 14: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 15: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 16: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, e, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 17: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 18: { if (PixelsEqual(source,b,source,f,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, b, b, d }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, d }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } default: { if (PixelsEqual(source,d,source,h,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, d, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } } #undef caseA #undef caseB } static inline unsigned int Hq2XPatternToNumber(const int *pattern) { ssize_t i; unsigned int result, order; result=0; order=1; for (i=7; i >= 0; i--) { result+=order*pattern[i]; order*=2; } return(result); } static inline void Hq2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { static const unsigned int Hq2XTable[] = { 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14 }; const int pattern1[] = { !PixelsEqual(pixels,4,pixels,8,channels), !PixelsEqual(pixels,4,pixels,7,channels), !PixelsEqual(pixels,4,pixels,6,channels), !PixelsEqual(pixels,4,pixels,5,channels), !PixelsEqual(pixels,4,pixels,3,channels), !PixelsEqual(pixels,4,pixels,2,channels), !PixelsEqual(pixels,4,pixels,1,channels), !PixelsEqual(pixels,4,pixels,0,channels) }; #define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5] const int pattern2[] = { Rotated(pattern1) }; const int pattern3[] = { Rotated(pattern2) }; const int pattern4[] = { Rotated(pattern3) }; #undef Rotated (void) source; Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0, channels,4,0,1,3,5,7); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1, channels,4,2,5,1,7,3); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3, channels,4,8,7,5,3,1); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2, channels,4,6,3,7,1,5); } static void Fish2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { #define Corner(A,B,C,D) \ { \ if (intensities[B] > intensities[A]) \ { \ ssize_t \ offsets[3] = { B, C, D }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ else \ { \ ssize_t \ offsets[3] = { A, B, C }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ } #define Line(A,B,C,D) \ { \ if (intensities[C] > intensities[A]) \ Mix2Pixels(pixels,C,D,result,3,channels); \ else \ Mix2Pixels(pixels,A,B,result,3,channels); \ } MagickFloatType intensities[9]; int ae, bd, ab, ad, be, de; register ssize_t i; ssize_t offsets[4] = { 0, 1, 3, 4 }; for (i=0; i < 9; i++) intensities[i]=GetPixelIntensity(source,pixels + i*channels); CopyPixels(pixels,0,result,0,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[1] ? 0 : 1),result, 1,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[3] ? 0 : 3),result, 2,channels); ae=PixelsEqual(pixels,0,pixels,4,channels); bd=PixelsEqual(pixels,1,pixels,3,channels); ab=PixelsEqual(pixels,0,pixels,1,channels); de=PixelsEqual(pixels,3,pixels,4,channels); ad=PixelsEqual(pixels,0,pixels,3,channels); be=PixelsEqual(pixels,1,pixels,4,channels); if (ae && bd && ab) { CopyPixels(pixels,0,result,3,channels); return; } if (ad && de && !ab) { Corner(1,0,4,3) return; } if (be && de && !ab) { Corner(0,1,3,4) return; } if (ad && ab && !be) { Corner(4,3,1,0) return; } if (ab && be && !ad) { Corner(3,0,4,1) return; } if (ae && (!bd || intensities[1] > intensities[0])) { Mix2Pixels(pixels,0,4,result,3,channels); return; } if (bd && (!ae || intensities[0] > intensities[1])) { Mix2Pixels(pixels,1,3,result,3,channels); return; } if (ab) { Line(0,1,3,4) return; } if (de) { Line(3,4,0,1) return; } if (ad) { Line(0,3,1,4) return; } if (be) { Line(1,4,0,3) return; } MixPixels(pixels,offsets,4,result,3,channels); #undef Corner #undef Line } static void Xbr2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { #define WeightVar(M,N) const int w_##M##_##N = \ PixelsEqual(pixels,M,pixels,N,channels) ? 0 : 1; WeightVar(12,11) WeightVar(12,7) WeightVar(12,13) WeightVar(12,17) WeightVar(12,16) WeightVar(12,8) WeightVar(6,10) WeightVar(6,2) WeightVar(11,7) WeightVar(11,17) WeightVar(11,5) WeightVar(7,13) WeightVar(7,1) WeightVar(12,6) WeightVar(12,18) WeightVar(8,14) WeightVar(8,2) WeightVar(13,17) WeightVar(13,9) WeightVar(7,3) WeightVar(16,10) WeightVar(16,22) WeightVar(17,21) WeightVar(11,15) WeightVar(18,14) WeightVar(18,22) WeightVar(17,23) WeightVar(17,19) #undef WeightVar if ( w_12_16 + w_12_8 + w_6_10 + w_6_2 + (4 * w_11_7) < w_11_17 + w_11_5 + w_7_13 + w_7_1 + (4 * w_12_6) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_7 ? 11 : 7),12,result,0, channels); else CopyPixels(pixels,12,result,0,channels); if ( w_12_18 + w_12_6 + w_8_14 + w_8_2 + (4 * w_7_13) < w_13_17 + w_13_9 + w_11_7 + w_7_3 + (4 * w_12_8) ) Mix2Pixels(pixels,(ssize_t) (w_12_7 <= w_12_13 ? 7 : 13),12,result,1, channels); else CopyPixels(pixels,12,result,1,channels); if ( w_12_6 + w_12_18 + w_16_10 + w_16_22 + (4 * w_11_17) < w_11_7 + w_11_15 + w_13_17 + w_17_21 + (4 * w_12_16) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_17 ? 11 : 17),12,result,2, channels); else CopyPixels(pixels,12,result,2,channels); if ( w_12_8 + w_12_16 + w_18_14 + w_18_22 + (4 * w_13_17) < w_11_17 + w_17_23 + w_17_19 + w_7_13 + (4 * w_12_18) ) Mix2Pixels(pixels,(ssize_t) (w_12_13 <= w_12_17 ? 13 : 17),12,result,3, channels); else CopyPixels(pixels,12,result,3,channels); } static void Scale2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { if (PixelsEqual(pixels,1,pixels,7,channels) || PixelsEqual(pixels,3,pixels,5,channels)) { register ssize_t i; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); return; } if (PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if (PixelsEqual(pixels,1,pixels,5,channels)) CopyPixels(pixels,5,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,3,channels); else CopyPixels(pixels,4,result,3,channels); } static void Epbx2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { #define HelperCond(a,b,c,d,e,f,g) ( \ PixelsEqual(pixels,a,pixels,b,channels) && ( \ PixelsEqual(pixels,c,pixels,d,channels) || \ PixelsEqual(pixels,c,pixels,e,channels) || \ PixelsEqual(pixels,a,pixels,f,channels) || \ PixelsEqual(pixels,b,pixels,g,channels) \ ) \ ) register ssize_t i; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if ( !PixelsEqual(pixels,3,pixels,5,channels) && !PixelsEqual(pixels,1,pixels,7,channels) && ( PixelsEqual(pixels,4,pixels,3,channels) || PixelsEqual(pixels,4,pixels,7,channels) || PixelsEqual(pixels,4,pixels,5,channels) || PixelsEqual(pixels,4,pixels,1,channels) || ( ( !PixelsEqual(pixels,0,pixels,8,channels) || PixelsEqual(pixels,4,pixels,6,channels) || PixelsEqual(pixels,3,pixels,2,channels) ) && ( !PixelsEqual(pixels,6,pixels,2,channels) || PixelsEqual(pixels,4,pixels,0,channels) || PixelsEqual(pixels,4,pixels,8,channels) ) ) ) ) { if (HelperCond(1,3,4,0,8,2,6)) Mix2Pixels(pixels,1,3,result,0,channels); if (HelperCond(5,1,4,2,6,8,0)) Mix2Pixels(pixels,5,1,result,1,channels); if (HelperCond(3,7,4,6,2,0,8)) Mix2Pixels(pixels,3,7,result,2,channels); if (HelperCond(7,5,4,8,0,6,2)) Mix2Pixels(pixels,7,5,result,3,channels); } #undef HelperCond } static inline void Eagle3X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); if (corner_tl && corner_tr) Mix2Pixels(pixels,0,2,result,1,channels); else CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); if (corner_tl && corner_bl) Mix2Pixels(pixels,0,6,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if (corner_tr && corner_br) Mix2Pixels(pixels,2,8,result,5,channels); else CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); if (corner_bl && corner_br) Mix2Pixels(pixels,6,8,result,7,channels); else CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Eagle3XB(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Scale3X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { if (!PixelsEqual(pixels,1,pixels,7,channels) && !PixelsEqual(pixels,3,pixels,5,channels)) { if (PixelsEqual(pixels,3,pixels,1,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) || ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,1,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,5,pixels,1,channels)) CopyPixels(pixels,5,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) || ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,3,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if ( ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) ) CopyPixels(pixels,5,result,5,channels); else CopyPixels(pixels,4,result,5,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,6,channels); else CopyPixels(pixels,4,result,6,channels); if ( ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) ) CopyPixels(pixels,7,result,7,channels); else CopyPixels(pixels,4,result,7,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,8,channels); else CopyPixels(pixels,4,result,8,channels); } else { register ssize_t i; for (i=0; i < 9; i++) CopyPixels(pixels,4,result,i,channels); } } MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; const char *option; Image *source_image, *magnify_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo rectangle; ssize_t y; unsigned char magnification, width; void (*scaling_method)(const Image *,const Quantum *,Quantum *,size_t); /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); option=GetImageOption(image->image_info,"magnify:method"); if (option == (char *) NULL) option="scale2x"; scaling_method=Scale2X; magnification=1; width=1; switch (*option) { case 'e': { if (LocaleCompare(option,"eagle2x") == 0) { scaling_method=Eagle2X; magnification=2; width=3; break; } if (LocaleCompare(option,"eagle3x") == 0) { scaling_method=Eagle3X; magnification=3; width=3; break; } if (LocaleCompare(option,"eagle3xb") == 0) { scaling_method=Eagle3XB; magnification=3; width=3; break; } if (LocaleCompare(option,"epbx2x") == 0) { scaling_method=Epbx2X; magnification=2; width=3; break; } break; } case 'f': { if (LocaleCompare(option,"fish2x") == 0) { scaling_method=Fish2X; magnification=2; width=3; break; } break; } case 'h': { if (LocaleCompare(option,"hq2x") == 0) { scaling_method=Hq2X; magnification=2; width=3; break; } break; } case 's': { if (LocaleCompare(option,"scale2x") == 0) { scaling_method=Scale2X; magnification=2; width=3; break; } if (LocaleCompare(option,"scale3x") == 0) { scaling_method=Scale3X; magnification=3; width=3; break; } break; } case 'x': { if (LocaleCompare(option,"xbr2x") == 0) { scaling_method=Xbr2X; magnification=2; width=5; } break; } default: break; } /* Make a working copy of the source image and convert it to RGB colorspace. */ source_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (source_image == (Image *) NULL) return((Image *) NULL); offset.x=0; offset.y=0; rectangle.x=0; rectangle.y=0; rectangle.width=image->columns; rectangle.height=image->rows; (void) CopyImagePixels(source_image,image,&rectangle,&offset,exception); (void) SetImageColorspace(source_image,RGBColorspace,exception); magnify_image=CloneImage(source_image,magnification*source_image->columns, magnification*source_image->rows,MagickTrue,exception); if (magnify_image == (Image *) NULL) { source_image=DestroyImage(source_image); return((Image *) NULL); } /* Magnify the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(source_image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,magnify_image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { Quantum r[128]; /* to hold result pixels */ register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,magnification*y, magnify_image->columns,magnification,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } /* Magnify this row of pixels. */ for (x=0; x < (ssize_t) source_image->columns; x++) { register const Quantum *magick_restrict p; size_t channels; register ssize_t i; ssize_t j; p=GetCacheViewVirtualPixels(image_view,x-width/2,y-width/2,width,width, exception); channels=GetPixelChannels(source_image); scaling_method(source_image,p,r,channels); /* Copy the result pixels into the final image. */ for (j=0; j < (ssize_t) magnification; j++) for (i=0; i < (ssize_t) (channels*magnification); i++) q[j*channels*magnify_image->columns+i]=r[j*magnification*channels+i]; q+=magnification*GetPixelChannels(magnify_image); } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterType filter, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterType filter,ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ? 72.0 : image->resolution.x)+0.5); height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ? 72.0 : image->resolution.y)+0.5); resample_image=ResizeImage(image,width,height,filter,exception); if (resample_image != (Image *) NULL) { resample_image->resolution.x=x_resolution; resample_image->resolution.y=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns,const size_t rows, % const FilterType filter,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { double weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) memset(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static MagickBooleanType HorizontalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double x_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; double scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) resize_image->rows; y++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j-start].pixel-contribution[0].pixel); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } /* Alpha blending. */ gamma=0.0; for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight*QuantumScale* GetPixelAlpha(image,p+k*GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double y_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; double scale, support; MagickBooleanType status; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) resize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } gamma=0.0; for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k* GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterType filter,ExceptionInfo *exception) { double x_factor, y_factor; FilterType filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter)) return(CloneImage(image,0,0,MagickTrue,exception)); /* Acquire resize filter. */ x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->alpha_trait != UndefinedPixelTrait) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) resize_image=AccelerateResizeImage(image,columns,rows,resize_filter, exception); if (resize_image != (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } #endif resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(DestroyImage(resize_image)); } /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t x1; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Set the sampling offset, default is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x1=0; x1 < (ssize_t) sample_image->columns; x1++) x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,sample_image,sample_image->rows,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(sample_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++) { PixelChannel channel; PixelTrait image_traits, traits; channel=GetPixelChannelChannel(sample_image,i); traits=GetPixelChannelTraits(sample_image,channel); image_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (image_traits == UndefinedPixelTrait)) continue; SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels( image)+i],q); } q+=GetPixelChannels(sample_image); } if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; double alpha, pixel[CompositePixelChannel], *scale_scanline, *scanline, *x_vector, *y_vector; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; PixelTrait scale_traits; PointInfo scale, span; register ssize_t i; ssize_t n, number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse) { scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*scanline)); scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns, MaxPixelChannels*sizeof(*scale_scanline)); y_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*y_vector)); if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) || (x_vector == (double *) NULL) || (y_vector == (double *) NULL)) { if ((image->rows != scale_image->rows) && (scanline != (double *) NULL)) scanline=(double *) RelinquishMagickMemory(scanline); if (scale_scanline != (double *) NULL) scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (x_vector != (double *) NULL) x_vector=(double *) RelinquishMagickMemory(x_vector); if (y_vector != (double *) NULL) y_vector=(double *) RelinquishMagickMemory(y_vector); scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns* sizeof(*y_vector)); n=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } alpha=1.0; if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) y_vector[x*GetPixelChannels(image)+i]+=scale.y* x_vector[x*GetPixelChannels(image)+i]; span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; next_row=MagickFalse; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y* x_vector[x*GetPixelChannels(image)+i]; scanline[x*GetPixelChannels(image)+i]=pixel[i]; y_vector[x*GetPixelChannels(image)+i]=0.0; } } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[ x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } else { ssize_t t; /* Scale X direction. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; span.x=1.0; t=0; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i]; scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; } scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i]; span.x-=scale.x; } } if (span.x > 0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i]; } if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scale_scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha* scale_scanline[x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(double *) RelinquishMagickMemory(y_vector); scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(double *) RelinquishMagickMemory(scanline); x_vector=(double *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char filename[MagickPathExtent], value[MagickPathExtent]; const char *name; Image *thumbnail_image; double x_factor, y_factor; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MagickPathExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception); GetPathComponent(image->magick_filename,TailPath,filename); (void) CopyMagickString(value,filename,MagickPathExtent); if ( GetPathAttributes(image->filename,&attributes) != MagickFalse ) (void) FormatImageProperty(thumbnail_image,"Thumb::MTime","%.20g",(double) attributes.st_mtime); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent, value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception); (void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception); (void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL, exception); (void) FormatImageProperty(thumbnail_image,"Thumb::Image::Width","%.20g", (double) image->magick_columns); (void) FormatImageProperty(thumbnail_image,"Thumb::Image::Height","%.20g", (double) image->magick_rows); (void) FormatImageProperty(thumbnail_image,"Thumb::Document::Pages","%.20g", (double) GetImageListLength(image)); return(thumbnail_image); }
GB_binop__pow_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_fc64 // A.*B function (eWiseMult): GB_AemultB__pow_fc64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_fc64 // C+=b function (dense accum): GB_Cdense_accumb__pow_fc64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_fc64 // C=scalar+B GB_bind1st__pow_fc64 // C=scalar+B' GB_bind1st_tran__pow_fc64 // C=A+scalar GB_bind2nd__pow_fc64 // C=A'+scalar GB_bind2nd_tran__pow_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_cpow (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_cpow (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FC64 || GxB_NO_POW_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pow_fc64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_fc64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_fc64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_cpow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_fc64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_cpow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_cpow (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_fc64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_cpow (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_krb5pa-sha1_fmt_plug.c
/* * Kerberos 5 "PA ENC TIMESTAMP" by magnum & Dhiru * * Pcap file -> input file: * 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml * 2. krbng2john.py ~/capture.pdml > krb5.in * 3. Run john on krb5.in * * http://www.ietf.org/rfc/rfc4757.txt * http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html * * Input format is 'user:$krb5pa$etype$user$realm$salt$timestamp+checksum' * * NOTE: Checksum implies last 12 bytes of PA_ENC_TIMESTAMP value in AS-REQ * packet. * * Default Salt: realm + user * * AES-256 encryption & decryption of AS-REQ timestamp in Kerberos v5 * See the following RFC for more details about the crypto & algorithms used: * * RFC3961 - Encryption and Checksum Specifications for Kerberos 5 * RFC3962 - Advanced Encryption Standard (AES) Encryption for Kerberos 5 * * march 09 / kevin devine <wyse101 0x40 gmail.com> * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * This software is Copyright (c) 2012 Dhiru Kholia (dhiru at openwall.com) and * released under same terms as above */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_krb5pa_sha1; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_krb5pa_sha1); #else #include <errno.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "common.h" #include "unicode.h" #include "config.h" #include "aes.h" #include "krb5_common.h" #include "common-opencl.h" #define OUTLEN 32 #include "opencl_pbkdf2_hmac_sha1.h" #include "hmac_sha.h" #include "loader.h" #define FORMAT_LABEL "krb5pa-sha1-opencl" #define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */ #define FORMAT_TAG "$krb5pa$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 12 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 1 #define MAX_SALTLEN 52 #define MAX_REALMLEN MAX_SALTLEN #define MAX_USERLEN MAX_SALTLEN #define TIMESTAMP_SIZE 44 #define CHECKSUM_SIZE BINARY_SIZE #define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 /* This handles all sizes */ #define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) /* This is faster but can't handle size 3 */ //#define GETPOS(i, index) (((index) & (ocl_v_width - 1)) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) static struct fmt_tests tests[] = { {"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"}, {"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"}, {"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, /* etype 17 hash obtained using MiTM etype downgrade attack */ {"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"}, {NULL}, }; static cl_mem mem_in, mem_out, mem_salt, mem_state, pinned_in, pinned_out; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; static struct fmt_main *self; static struct custom_salt { int type; int etype; unsigned char realm[64]; unsigned char user[64]; unsigned char salt[64]; /* realm + user */ unsigned char ct[TIMESTAMP_SIZE]; } *cur_salt; static unsigned char constant[16]; static unsigned char ke_input[16]; static unsigned char ki_input[16]; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_salt currentsalt; static pbkdf2_out *output; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static int new_keys; #define ITERATIONS (4096 - 1) #define HASH_LOOPS 105 // Must be made from factors 3, 3, 5, 7, 13 #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: ", ", init: ", ", loop: ", ", inter: ", ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } #if 0 struct fmt_main *me; #endif static void create_clobj(size_t gws, struct fmt_main *self) { gws *= ocl_v_width; key_buf_size = 64 * gws; // Allocate memory pinned_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating pinned in"); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); inbuffer = clEnqueueMapBuffer(queue[gpu_id], pinned_in, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, key_buf_size, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(pbkdf2_salt), &currentsalt, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); pinned_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating pinned out"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); output = clEnqueueMapBuffer(queue[gpu_id], pinned_out, CL_TRUE, CL_MAP_READ, 0, sizeof(pbkdf2_out) * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); crypt_out = mem_alloc(sizeof(*crypt_out) * gws); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_in, inbuffer, 0, NULL, NULL), "Error Unmapping mem in"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_out, output, 0, NULL, NULL), "Error Unmapping mem in"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings"); HANDLE_CLERROR(clReleaseMemObject(pinned_in), "Release pinned_in"); HANDLE_CLERROR(clReleaseMemObject(pinned_out), "Release pinned_out"); HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release pinned_in"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem_out"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem_salt"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release Kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release Kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release Kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { unsigned char usage[5]; static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; self = _self; opencl_prepare_dev(gpu_id); /* VLIW5 does better with just 2x vectors due to GPR pressure */ if (!options.v_width && amd_vliw5(device_info[gpu_id])) ocl_v_width = 2; else ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int)); if (ocl_v_width > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), ALGORITHM_NAME " %ux", ocl_v_width); self->params.algorithm_name = valgo; } // generate 128 bits from 40 bits of "kerberos" string nfold(8 * 8, (unsigned char*)"kerberos", 128, constant); memset(usage,0,sizeof(usage)); usage[3] = 0x01; // key number in big-endian format usage[4] = 0xAA; // used to derive Ke nfold(sizeof(usage) * 8, usage, sizeof(ke_input) * 8, ke_input); memset(usage,0,sizeof(usage)); usage[3] = 0x01; // key number in big-endian format usage[4] = 0x55; // used to derive Ki nfold(sizeof(usage) * 8, usage, sizeof(ki_input) * 8, ki_input); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[128]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DITERATIONS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, ITERATIONS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2 * HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, ocl_v_width * sizeof(pbkdf2_state), 0, db); //Auto tune execution from shared/included code. autotune_run(self, 4 * ITERATIONS + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 5000000000ULL)); } } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *data = ciphertext; int type, saltlen = 0; // tag is mandatory if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; data += FORMAT_TAG_LEN; // etype field, 17 or 18 p = strchr(data, '$'); if (!p || p - data != 2) return 0; type = atoi(data); if (type < 17 || type > 18) return 0; data = p + 1; // user field p = strchr(data, '$'); if (!p || p - data > MAX_USERLEN) return 0; saltlen += p - data; data = p + 1; // realm field p = strchr(data, '$'); if (!p || p - data > MAX_REALMLEN) return 0; saltlen += p - data; data = p + 1; // salt field p = strchr(data, '$'); if (!p) return 0; // if salt is empty, realm.user is used instead if (p - data) saltlen = p - data; data = p + 1; // We support a max. total salt length of 52. // We could opt to emit a warning if rejected here. if (saltlen > MAX_SALTLEN) { static int warned = 0; if (!ldr_in_pot) if (!warned++) fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL); return 0; } // 56 bytes (112 hex chars) encrypted timestamp + checksum if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) || strspn(data, HEXCHARS_all) != strlen(data)) return 0; return 1; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "$"); cs.etype = atoi(p); p = strtokm(NULL, "$"); if (p[-1] == '$') cs.user[0] = 0; else { strcpy((char*)cs.user, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') cs.realm[0] = 0; else { strcpy((char*)cs.realm, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') { strcpy((char*)cs.salt, (char*)cs.realm); strcat((char*)cs.salt, (char*)cs.user); } else { strcpy((char*)cs.salt, p); p = strtokm(NULL, "$"); } for (i = 0; i < TIMESTAMP_SIZE; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } static void set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char* get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[TOTAL_LENGTH + 1]; char in[TOTAL_LENGTH + 1]; char salt[MAX_SALTLEN + 1]; char *data; char *e, *u, *r, *s, *tc; strnzcpy(in, ciphertext, sizeof(in)); tc = strrchr(in, '$'); *tc++ = 0; s = strrchr(in, '$'); *s++ = 0; r = strrchr(in, '$'); *r++ = 0; u = strrchr(in, '$'); *u++ = 0; e = in + 8; /* Default salt is user.realm */ if (!*s) { snprintf(salt, sizeof(salt), "%s%s", r, u); s = salt; } snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc); data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1; strlwr(data); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum field */ for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; currentsalt.length = strlen((char*)cur_salt->salt); currentsalt.iterations = ITERATIONS; memcpy(currentsalt.salt, cur_salt->salt, currentsalt.length); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i; int key_size; size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = global_work_size * ocl_v_width; if (cur_salt->etype == 17) key_size = 16; else key_size = 32; // Copy data to gpu if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); new_keys = 0; } // Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); for (i = 0; i < (ocl_autotune_running ? 1 : ITERATIONS / HASH_LOOPS); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); for (i = 0; i < (ocl_autotune_running ? 1 : ITERATIONS / HASH_LOOPS); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel (2nd pass)"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[4]), "Run final kernel (SHA1)"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Failed running final kernel"); // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[5]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { unsigned char base_key[32]; unsigned char Ke[32]; unsigned char plaintext[TIMESTAMP_SIZE]; // pbkdf2((const unsigned char*)saved_key[i], len, (unsigned char *)cur_salt->salt,strlen((char*)cur_salt->salt), 4096, (unsigned int*)tkey); dk(base_key, (unsigned char*)output[i].dk, key_size, constant, 32); dk(Ke, base_key, key_size, ke_input, 32); // Decrypt the AS-REQ timestamp encrypted with 256-bit AES. krb_decrypt(cur_salt->ct, TIMESTAMP_SIZE, plaintext, Ke, key_size); // Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and // bail out if we are out of luck. if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') { unsigned char Ki[32]; unsigned char checksum[20]; dk(Ki, base_key, key_size, ki_input, 32); // derive checksum of plaintext (only 96 bits used out of 160) hmac_sha1(Ki, key_size, plaintext, TIMESTAMP_SIZE, checksum, 20); memcpy(crypt_out[i], checksum, BINARY_SIZE); } else { memset(crypt_out[i], 0, BINARY_SIZE); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_krb5pa_sha1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
collision.c
/** * @file collision.c * @brief Collision search routine. * @author Hanno Rein <hanno@hanno-rein.de> * * @details A collision is defined as an overlap between two particles. This * is only an approximation and works only if the timestep is small * enough. More precisely, dt << v / Rp, where v is the typical velocity * and Rp the radius of a particle. Furthermore, particles must be * approaching each other at the time when they overlap. * * * @section LICENSE * Copyright (c) 2011 Hanno Rein, Shangfei Liu * * This file is part of rebound. * * rebound is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * rebound is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with rebound. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <time.h> #include "particle.h" #include "collision.h" #include "rebound.h" #include "boundary.h" #include "tree.h" #ifdef MPI #include "communication_mpi.h" #endif // MPI #define MIN(a, b) ((a) > (b) ? (b) : (a)) ///< Returns the minimum of a and b #define MAX(a, b) ((a) > (b) ? (a) : (b)) ///< Returns the maximum of a and b static void reb_tree_get_nearest_neighbour_in_cell(struct reb_simulation* const r, int* collisions_N, struct reb_ghostbox gb, struct reb_ghostbox gbunmod, int ri, double p1_r, double* nearest_r2, struct reb_collision* collision_nearest, struct reb_treecell* c); void reb_collision_search(struct reb_simulation* const r){ const int N = r->N; int collisions_N = 0; const struct reb_particle* const particles = r->particles; switch (r->collision){ case REB_COLLISION_NONE: break; case REB_COLLISION_DIRECT: { // Loop over ghost boxes, but only the inner most ring. int nghostxcol = (r->nghostx>1?1:r->nghostx); int nghostycol = (r->nghosty>1?1:r->nghosty); int nghostzcol = (r->nghostz>1?1:r->nghostz); for (int gbx=-nghostxcol; gbx<=nghostxcol; gbx++){ for (int gby=-nghostycol; gby<=nghostycol; gby++){ for (int gbz=-nghostzcol; gbz<=nghostzcol; gbz++){ // Loop over all particles for (int i=0;i<N;i++){ struct reb_particle p1 = particles[i]; struct reb_ghostbox gborig = reb_boundary_get_ghostbox(r, gbx,gby,gbz); struct reb_ghostbox gb = gborig; // Precalculate shifted position gb.shiftx += p1.x; gb.shifty += p1.y; gb.shiftz += p1.z; gb.shiftvx += p1.vx; gb.shiftvy += p1.vy; gb.shiftvz += p1.vz; // Loop over all particles again for (int j=0;j<N;j++){ // Do not collide particle with itself. if (i==j) continue; struct reb_particle p2 = particles[j]; double dx = gb.shiftx - p2.x; double dy = gb.shifty - p2.y; double dz = gb.shiftz - p2.z; double sr = p1.r + p2.r; double r2 = dx*dx+dy*dy+dz*dz; // Check if particles are overlapping if (r2>sr*sr) continue; double dvx = gb.shiftvx - p2.vx; double dvy = gb.shiftvy - p2.vy; double dvz = gb.shiftvz - p2.vz; // Check if particles are approaching each other if (dvx*dx + dvy*dy + dvz*dz >0) continue; // Add particles to collision array. if (r->collisions_allocatedN<=collisions_N){ // Allocate memory if there is no space in array. // Doing it in chunks of 32 to avoid having to do it too often. r->collisions_allocatedN += 32; r->collisions = realloc(r->collisions,sizeof(struct reb_collision)*r->collisions_allocatedN); } r->collisions[collisions_N].p1 = i; r->collisions[collisions_N].p2 = j; r->collisions[collisions_N].gb = gborig; collisions_N++; } } } } } } break; case REB_COLLISION_MERCURIUS: { struct reb_ghostbox gborig = reb_boundary_get_ghostbox(r, 0,0,0); if (r->ri_mercurius.mode==1){ // encounters // Loop over all particles for (int i=0;i<N;i++){ struct reb_particle p1 = particles[i]; // Loop over all particles again for (int j=0;j<N;j++){ // Do not collide particle with itself. if (i==j) continue; struct reb_particle p2 = particles[j]; double dx = p1.x - p2.x; double dy = p1.y - p2.y; double dz = p1.z - p2.z; double sr = p1.r + p2.r; double r2 = dx*dx+dy*dy+dz*dz; // Check if particles are overlapping if (r2>sr*sr) continue; double dvx = p1.vx - p2.vx; double dvy = p1.vy - p2.vy; double dvz = p1.vz - p2.vz; // Check if particles are approaching each other if (dvx*dx + dvy*dy + dvz*dz >0) continue; // Add particles to collision array. if (r->collisions_allocatedN<=collisions_N){ // Allocate memory if there is no space in array. // Doing it in chunks of 32 to avoid having to do it too often. r->collisions_allocatedN += 32; r->collisions = realloc(r->collisions,sizeof(struct reb_collision)*r->collisions_allocatedN); } r->collisions[collisions_N].p1 = i; r->collisions[collisions_N].p2 = j; r->collisions[collisions_N].gb = gborig; collisions_N++; } } }else{ // Only checking for collisions with star struct reb_particle p1 = particles[0]; double dt = r->dt; // Loop over all particles again for (int j=1;j<N;j++){ // Simplified version of MERCURIUS collision prediction // This only takes the current positions as volicities. // Can be simplified. struct reb_particle p2 = particles[j]; const double dxn = p1.x - p2.x; const double dyn = p1.y - p2.y; const double dzn = p1.z - p2.z; const double dvxn = p1.vx - p2.vx; const double dvyn = p1.vy - p2.vy; const double dvzn = p1.vz - p2.vz; const double rn = (dxn*dxn + dyn*dyn + dzn*dzn); const double dxo = p1.x - p2.x - dt*dvxn; const double dyo = p1.y - p2.y - dt*dvyn; const double dzo = p1.z - p2.z - dt*dvzn; const double dvxo = dvxn; const double dvyo = dvyn; const double dvzo = dvzn; const double ro = (dxo*dxo + dyo*dyo + dzo*dzo); const double drndt = (dxn*dvxn+dyn*dvyn+dzn*dvzn)*2.; const double drodt = (dxo*dvxo+dyo*dvyo+dzo*dvzo)*2.; const double a = 6.*(ro-rn)+3.*dt*(drodt+drndt); const double b = 6.*(rn-ro)-2.*dt*(2.*drodt+drndt); const double c = dt*drodt; double rmin = MIN(rn,ro); const double s = b*b-4.*a*c; const double sr = sqrt(s); const double tmin1 = (-b + sr)/(2.*a); const double tmin2 = (-b - sr)/(2.*a); if (tmin1>0. && tmin1<1.){ const double rmin1 = (1.-tmin1)*(1.-tmin1)*(1.+2.*tmin1)*ro + tmin1*tmin1*(3.-2.*tmin1)*rn + tmin1*(1.-tmin1)*(1.-tmin1)*dt*drodt - tmin1*tmin1*(1.-tmin1)*dt*drndt; rmin = MIN(MAX(rmin1,0.),rmin); } if (tmin2>0. && tmin2<1.){ const double rmin2 = (1.-tmin2)*(1.-tmin2)*(1.+2.*tmin2)*ro + tmin2*tmin2*(3.-2.*tmin2)*rn + tmin2*(1.-tmin2)*(1.-tmin2)*dt*drodt - tmin2*tmin2*(1.-tmin2)*dt*drndt; rmin = MIN(MAX(rmin2,0.),rmin); } const double spr = p1.r + p2.r; // Check if particles are overlapping if (rmin>spr*spr) continue; // Add particles to collision array. if (r->collisions_allocatedN<=collisions_N){ // Allocate memory if there is no space in array. // Doing it in chunks of 32 to avoid having to do it too often. r->collisions_allocatedN += 32; r->collisions = realloc(r->collisions,sizeof(struct reb_collision)*r->collisions_allocatedN); } r->collisions[collisions_N].p1 = 0; r->collisions[collisions_N].p2 = j; r->collisions[collisions_N].gb = gborig; collisions_N++; r->ri_mercurius.recalculate_rhill_this_timestep = 1; } } } break; case REB_COLLISION_TREE: { // Update and simplify tree. // Prepare particles for distribution to other nodes. reb_tree_update(r); #ifdef MPI // Distribute particles and add newly received particles to tree. reb_communication_mpi_distribute_particles(r); // Prepare essential tree (and particles close to the boundary needed for collisions) for distribution to other nodes. reb_tree_prepare_essential_tree_for_collisions(r); // Transfer essential tree and particles needed for collisions. reb_communication_mpi_distribute_essential_tree_for_collisions(r); #endif // MPI // Loop over ghost boxes, but only the inner most ring. int nghostxcol = (r->nghostx>1?1:r->nghostx); int nghostycol = (r->nghosty>1?1:r->nghosty); int nghostzcol = (r->nghostz>1?1:r->nghostz); const struct reb_particle* const particles = r->particles; const int N = r->N; // Loop over all particles #pragma omp parallel for schedule(guided) for (int i=0;i<N;i++){ struct reb_particle p1 = particles[i]; struct reb_collision collision_nearest; collision_nearest.p1 = i; collision_nearest.p2 = -1; double p1_r = p1.r; double nearest_r2 = r->boxsize_max*r->boxsize_max/4.; // Loop over ghost boxes. for (int gbx=-nghostxcol; gbx<=nghostxcol; gbx++){ for (int gby=-nghostycol; gby<=nghostycol; gby++){ for (int gbz=-nghostzcol; gbz<=nghostzcol; gbz++){ // Calculated shifted position (for speedup). struct reb_ghostbox gb = reb_boundary_get_ghostbox(r, gbx,gby,gbz); struct reb_ghostbox gbunmod = gb; gb.shiftx += p1.x; gb.shifty += p1.y; gb.shiftz += p1.z; gb.shiftvx += p1.vx; gb.shiftvy += p1.vy; gb.shiftvz += p1.vz; // Loop over all root boxes. for (int ri=0;ri<r->root_n;ri++){ struct reb_treecell* rootcell = r->tree_root[ri]; if (rootcell!=NULL){ reb_tree_get_nearest_neighbour_in_cell(r, &collisions_N, gb, gbunmod,ri,p1_r,&nearest_r2,&collision_nearest,rootcell); } } } } } // Continue if no collision was found if (collision_nearest.p2==-1) continue; } } break; default: reb_exit("Collision routine not implemented."); } // randomize for (int i=0;i<collisions_N;i++){ int new = rand()%collisions_N; struct reb_collision c1 = r->collisions[i]; r->collisions[i] = r->collisions[new]; r->collisions[new] = c1; } // Loop over all collisions previously found in reb_collision_search(). int (*resolve) (struct reb_simulation* const r, struct reb_collision c) = r->collision_resolve; if (resolve==NULL){ // Default is hard sphere resolve = reb_collision_resolve_hardsphere; } for (int i=0;i<collisions_N;i++){ struct reb_collision c = r->collisions[i]; if (c.p1 != -1 && c.p2 != -1){ // Resolve collision int outcome = resolve(r, c); // Remove particles if (outcome & 1){ // Remove p1 if (c.p2==r->N-1 && !(r->tree_root)){ // Particles swapped c.p2 = c.p1; } reb_remove(r,c.p1,r->collision_resolve_keep_sorted); // Check for pair for (int j=i+1;j<collisions_N;j++){ struct reb_collision cp = r->collisions[j]; if (cp.p1==c.p1 || cp.p2==c.p1){ r->collisions[j].p1 = -1; r->collisions[j].p2 = -1; // Will be skipped. } if (cp.p1==r->N){ r->collisions[j].p1 = c.p1; } if (cp.p2==r->N){ r->collisions[j].p2 = c.p1; } } } if (outcome & 2){ // Remove p2 reb_remove(r,c.p2,r->collision_resolve_keep_sorted); // Check for pair for (int j=i+1;j<collisions_N;j++){ struct reb_collision cp = r->collisions[j]; if (cp.p1==c.p2 || cp.p2==c.p2){ r->collisions[j].p1 = -1; r->collisions[j].p2 = -1; // Will be skipped. } if (cp.p1==r->N){ r->collisions[j].p1 = c.p2; } if (cp.p2==r->N){ r->collisions[j].p2 = c.p2; } } } } } } /** * @brief Workaround for python setters. **/ void reb_set_collision_resolve(struct reb_simulation* r, int (*resolve) (struct reb_simulation* const r, struct reb_collision c)){ r->collision_resolve = resolve; } /** * @brief Find the nearest neighbour in a cell or its daughters. * @details The function only returns a positive result if the particles * are overlapping. Thus, the name nearest neighbour is not * exactly true. * @param r REBOUND simulation to work on. * @param gb (Shifted) position and velocity of the particle. * @param ri Index of the root box currently being searched in. * @param p1_r Radius of the particle (this is not in gb). * @param nearest_r2 Pointer to the nearest neighbour found so far. * @param collision_nearest Pointer to the nearest collision found so far. * @param c Pointer to the cell currently being searched in. * @param collisions_N Pointer to current number of collisions * @param gbunmod Ghostbox unmodified */ static void reb_tree_get_nearest_neighbour_in_cell(struct reb_simulation* const r, int* collisions_N, struct reb_ghostbox gb, struct reb_ghostbox gbunmod, int ri, double p1_r, double* nearest_r2, struct reb_collision* collision_nearest, struct reb_treecell* c){ const struct reb_particle* const particles = r->particles; if (c->pt>=0){ // c is a leaf node int condition = 1; #ifdef MPI int isloc = 1 ; isloc = reb_communication_mpi_rootbox_is_local(r, ri); if (isloc==1){ #endif // MPI /** * If this is a local cell, make sure particle is not colliding with itself. * If this is a remote cell, the particle number might be the same, even for * different particles. * TODO: This can probably be written in a cleaner way. */ condition = (c->pt != collision_nearest->p1); #ifdef MPI } #endif // MPI if (condition){ struct reb_particle p2; #ifdef MPI if (isloc==1){ #endif // MPI p2 = particles[c->pt]; #ifdef MPI }else{ int root_n_per_node = r->root_n/r->mpi_num; int proc_id = ri/root_n_per_node; p2 = r->particles_recv[proc_id][c->pt]; } #endif // MPI double dx = gb.shiftx - p2.x; double dy = gb.shifty - p2.y; double dz = gb.shiftz - p2.z; double r2 = dx*dx+dy*dy+dz*dz; // A closer neighbour has already been found //if (r2 > *nearest_r2) return; double rp = p1_r+p2.r; // reb_particles are not overlapping if (r2 > rp*rp) return; double dvx = gb.shiftvx - p2.vx; double dvy = gb.shiftvy - p2.vy; double dvz = gb.shiftvz - p2.vz; // reb_particles are not approaching each other if (dvx*dx + dvy*dy + dvz*dz >0) return; // Found a new nearest neighbour. Save it for later. *nearest_r2 = r2; collision_nearest->ri = ri; collision_nearest->p2 = c->pt; collision_nearest->gb = gbunmod; // Save collision in collisions array. #pragma omp critical { if (r->collisions_allocatedN<=(*collisions_N)){ r->collisions_allocatedN += 32; r->collisions = realloc(r->collisions,sizeof(struct reb_collision)*r->collisions_allocatedN); } r->collisions[(*collisions_N)] = *collision_nearest; (*collisions_N)++; } } }else{ // c is not a leaf node double dx = gb.shiftx - c->x; double dy = gb.shifty - c->y; double dz = gb.shiftz - c->z; double r2 = dx*dx + dy*dy + dz*dz; double rp = p1_r + r->max_radius[1] + 0.86602540378443*c->w; // Check if we need to decent into daughter cells if (r2 < rp*rp ){ for (int o=0;o<8;o++){ struct reb_treecell* d = c->oct[o]; if (d!=NULL){ reb_tree_get_nearest_neighbour_in_cell(r, collisions_N, gb,gbunmod,ri,p1_r,nearest_r2,collision_nearest,d); } } } } } int reb_collision_resolve_hardsphere(struct reb_simulation* const r, struct reb_collision c){ struct reb_particle* const particles = r->particles; struct reb_particle p1 = particles[c.p1]; struct reb_particle p2; #ifdef MPI int isloc = reb_communication_mpi_rootbox_is_local(r, c.ri); if (isloc==1){ #endif // MPI p2 = particles[c.p2]; #ifdef MPI }else{ int root_n_per_node = r->root_n/r->mpi_num; int proc_id = c.ri/root_n_per_node; p2 = r->particles_recv[proc_id][c.p2]; } #endif // MPI // if (p1.lastcollision==t || p2.lastcollision==t) return; struct reb_ghostbox gb = c.gb; double x21 = p1.x + gb.shiftx - p2.x; double y21 = p1.y + gb.shifty - p2.y; double z21 = p1.z + gb.shiftz - p2.z; double rp = p1.r+p2.r; double oldvyouter; if (x21>0){ oldvyouter = p1.vy; }else{ oldvyouter = p2.vy; } if (rp*rp < x21*x21 + y21*y21 + z21*z21) return 0; double vx21 = p1.vx + gb.shiftvx - p2.vx; double vy21 = p1.vy + gb.shiftvy - p2.vy; double vz21 = p1.vz + gb.shiftvz - p2.vz; if (vx21*x21 + vy21*y21 + vz21*z21 >0) return 0; // not approaching // Bring the to balls in the xy plane. // NOTE: this could probabely be an atan (which is faster than atan2) double theta = atan2(z21,y21); double stheta = sin(theta); double ctheta = cos(theta); double vy21n = ctheta * vy21 + stheta * vz21; double y21n = ctheta * y21 + stheta * z21; // Bring the two balls onto the positive x axis. double phi = atan2(y21n,x21); double cphi = cos(phi); double sphi = sin(phi); double vx21nn = cphi * vx21 + sphi * vy21n; // Coefficient of restitution double eps= 1; // perfect bouncing by default if (r->coefficient_of_restitution){ eps = r->coefficient_of_restitution(r, vx21nn); } double dvx2 = -(1.0+eps)*vx21nn; double minr = (p1.r>p2.r)?p2.r:p1.r; double maxr = (p1.r<p2.r)?p2.r:p1.r; double mindv= minr*r->minimum_collision_velocity; double _r = sqrt(x21*x21 + y21*y21 + z21*z21); mindv *= 1.-(_r - maxr)/minr; if (mindv>maxr*r->minimum_collision_velocity)mindv = maxr*r->minimum_collision_velocity; if (dvx2<mindv) dvx2 = mindv; // Now we are rotating backwards double dvx2n = cphi * dvx2; double dvy2n = sphi * dvx2; double dvy2nn = ctheta * dvy2n; double dvz2nn = stheta * dvy2n; // Applying the changes to the particles. #ifdef MPI if (isloc==1){ #endif // MPI const double p2pf = p1.m/(p1.m+p2.m); particles[c.p2].vx -= p2pf*dvx2n; particles[c.p2].vy -= p2pf*dvy2nn; particles[c.p2].vz -= p2pf*dvz2nn; particles[c.p2].lastcollision = r->t; #ifdef MPI } #endif // MPI const double p1pf = p2.m/(p1.m+p2.m); particles[c.p1].vx += p1pf*dvx2n; particles[c.p1].vy += p1pf*dvy2nn; particles[c.p1].vz += p1pf*dvz2nn; particles[c.p1].lastcollision = r->t; // Return y-momentum change if (x21>0){ r->collisions_plog += -fabs(x21)*(oldvyouter-particles[c.p1].vy) * p1.m; r->collisions_Nlog ++; }else{ r->collisions_plog += -fabs(x21)*(oldvyouter-particles[c.p2].vy) * p2.m; r->collisions_Nlog ++; } return 0; } int reb_collision_resolve_merge(struct reb_simulation* const r, struct reb_collision c){ if (r->particles[c.p1].lastcollision==r->t || r->particles[c.p2].lastcollision==r->t) return 0; // Every collision will cause two callbacks (with p1/p2 interchanged). // Always remove particle with larger index and merge into lower index particle. // This will keep N_active meaningful even after mergers. int swap = 0; int i = c.p1; int j = c.p2; //want j to be removed particle if (j<i){ swap = 1; i = c.p2; j = c.p1; } struct reb_particle* pi = &(r->particles[i]); struct reb_particle* pj = &(r->particles[j]); double invmass = 1.0/(pi->m + pj->m); //Scale out energy from collision - initial energy double Ei=0, Ef=0; if(r->track_energy_offset){ { double vx = pi->vx; double vy = pi->vy; double vz = pi->vz; Ei += 0.5*pi->m*(vx*vx + vy*vy + vz*vz); } { double vx = pj->vx; double vy = pj->vy; double vz = pj->vz; Ei += 0.5*pj->m*(vx*vx + vy*vy + vz*vz); } { double x = pi->x - pj->x; double y = pi->y - pj->y; double z = pi->z - pj->z; double _r = sqrt(x*x + y*y + z*z); Ei += - r->G*pi->m*pj->m/_r; } } // Merge by conserving mass, volume and momentum pi->vx = (pi->vx*pi->m + pj->vx*pj->m)*invmass; pi->vy = (pi->vy*pi->m + pj->vy*pj->m)*invmass; pi->vz = (pi->vz*pi->m + pj->vz*pj->m)*invmass; pi->x = (pi->x*pi->m + pj->x*pj->m)*invmass; pi->y = (pi->y*pi->m + pj->y*pj->m)*invmass; pi->z = (pi->z*pi->m + pj->z*pj->m)*invmass; pi->m = pi->m + pj->m; pi->r = pow(pow(pi->r,3.)+pow(pj->r,3.),1./3.); pi->lastcollision = r->t; // Keeping track of energy offst if(r->track_energy_offset){ { double vx = pi->vx; double vy = pi->vy; double vz = pi->vz; Ef += 0.5*pi->m*(vx*vx + vy*vy + vz*vz); } r->energy_offset += Ei - Ef; } // If hermes calculate energy offset in global - hasn't been removed from global yet if (r->ri_hermes.global){ if(r->ri_hermes.global->ri_hermes.mini_active){ r->ri_hermes.global->ri_hermes.collision_this_global_dt = 1; } } return swap?1:2; // Remove particle p2 from simulation }
update.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2004, The GROMACS development team, * check out http://www.gromacs.org for more information. * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdio.h> #include <math.h> #include "types/commrec.h" #include "sysstuff.h" #include "smalloc.h" #include "typedefs.h" #include "nrnb.h" #include "physics.h" #include "macros.h" #include "vec.h" #include "main.h" #include "confio.h" #include "update.h" #include "gmx_random.h" #include "futil.h" #include "mshift.h" #include "tgroup.h" #include "force.h" #include "names.h" #include "txtdump.h" #include "mdrun.h" #include "copyrite.h" #include "constr.h" #include "edsam.h" #include "pull.h" #include "disre.h" #include "orires.h" #include "gmx_wallcycle.h" #include "gmx_omp_nthreads.h" #include "gmx_omp.h" /*For debugging, start at v(-dt/2) for velolcity verlet -- uncomment next line */ /*#define STARTFROMDT2*/ typedef struct { double gdt; double eph; double emh; double em; double b; double c; double d; } gmx_sd_const_t; typedef struct { real V; real X; real Yv; real Yx; } gmx_sd_sigma_t; typedef struct { /* The random state for ngaussrand threads. * Normal thermostats need just 1 random number generator, * but SD and BD with OpenMP parallelization need 1 for each thread. */ int ngaussrand; gmx_rng_t *gaussrand; /* BD stuff */ real *bd_rf; /* SD stuff */ gmx_sd_const_t *sdc; gmx_sd_sigma_t *sdsig; rvec *sd_V; int sd_V_nalloc; /* andersen temperature control stuff */ gmx_bool *randomize_group; real *boltzfac; } gmx_stochd_t; typedef struct gmx_update { gmx_stochd_t *sd; /* xprime for constraint algorithms */ rvec *xp; int xp_nalloc; /* variable size arrays for andersen */ gmx_bool *randatom; int *randatom_list; gmx_bool randatom_list_init; /* Variables for the deform algorithm */ gmx_large_int_t deformref_step; matrix deformref_box; } t_gmx_update; static void do_update_md(int start, int nrend, double dt, t_grp_tcstat *tcstat, double nh_vxi[], gmx_bool bNEMD, t_grp_acc *gstat, rvec accel[], ivec nFreeze[], real invmass[], unsigned short ptype[], unsigned short cFREEZE[], unsigned short cACC[], unsigned short cTC[], rvec x[], rvec xprime[], rvec v[], rvec f[], matrix M, gmx_bool bNH, gmx_bool bPR) { double imass, w_dt; int gf = 0, ga = 0, gt = 0; rvec vrel; real vn, vv, va, vb, vnrel; real lg, vxi = 0, u; int n, d; if (bNH || bPR) { /* Update with coupling to extended ensembles, used for * Nose-Hoover and Parrinello-Rahman coupling * Nose-Hoover uses the reversible leap-frog integrator from * Holian et al. Phys Rev E 52(3) : 2338, 1995 */ for (n = start; n < nrend; n++) { imass = invmass[n]; if (cFREEZE) { gf = cFREEZE[n]; } if (cACC) { ga = cACC[n]; } if (cTC) { gt = cTC[n]; } lg = tcstat[gt].lambda; if (bNH) { vxi = nh_vxi[gt]; } rvec_sub(v[n], gstat[ga].u, vrel); for (d = 0; d < DIM; d++) { if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) { vnrel = (lg*vrel[d] + dt*(imass*f[n][d] - 0.5*vxi*vrel[d] - iprod(M[d], vrel)))/(1 + 0.5*vxi*dt); /* do not scale the mean velocities u */ vn = gstat[ga].u[d] + accel[ga][d]*dt + vnrel; v[n][d] = vn; xprime[n][d] = x[n][d]+vn*dt; } else { v[n][d] = 0.0; xprime[n][d] = x[n][d]; } } } } else if (cFREEZE != NULL || nFreeze[0][XX] || nFreeze[0][YY] || nFreeze[0][ZZ] || bNEMD) { /* Update with Berendsen/v-rescale coupling and freeze or NEMD */ for (n = start; n < nrend; n++) { w_dt = invmass[n]*dt; if (cFREEZE) { gf = cFREEZE[n]; } if (cACC) { ga = cACC[n]; } if (cTC) { gt = cTC[n]; } lg = tcstat[gt].lambda; for (d = 0; d < DIM; d++) { vn = v[n][d]; if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) { vv = lg*vn + f[n][d]*w_dt; /* do not scale the mean velocities u */ u = gstat[ga].u[d]; va = vv + accel[ga][d]*dt; vb = va + (1.0-lg)*u; v[n][d] = vb; xprime[n][d] = x[n][d]+vb*dt; } else { v[n][d] = 0.0; xprime[n][d] = x[n][d]; } } } } else { /* Plain update with Berendsen/v-rescale coupling */ for (n = start; n < nrend; n++) { if ((ptype[n] != eptVSite) && (ptype[n] != eptShell)) { w_dt = invmass[n]*dt; if (cTC) { gt = cTC[n]; } lg = tcstat[gt].lambda; for (d = 0; d < DIM; d++) { vn = lg*v[n][d] + f[n][d]*w_dt; v[n][d] = vn; xprime[n][d] = x[n][d] + vn*dt; } } else { for (d = 0; d < DIM; d++) { v[n][d] = 0.0; xprime[n][d] = x[n][d]; } } } } } static void do_update_vv_vel(int start, int nrend, double dt, t_grp_tcstat *tcstat, t_grp_acc *gstat, rvec accel[], ivec nFreeze[], real invmass[], unsigned short ptype[], unsigned short cFREEZE[], unsigned short cACC[], rvec v[], rvec f[], gmx_bool bExtended, real veta, real alpha) { double imass, w_dt; int gf = 0, ga = 0; rvec vrel; real u, vn, vv, va, vb, vnrel; int n, d; double g, mv1, mv2; if (bExtended) { g = 0.25*dt*veta*alpha; mv1 = exp(-g); mv2 = series_sinhx(g); } else { mv1 = 1.0; mv2 = 1.0; } for (n = start; n < nrend; n++) { w_dt = invmass[n]*dt; if (cFREEZE) { gf = cFREEZE[n]; } if (cACC) { ga = cACC[n]; } for (d = 0; d < DIM; d++) { if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) { v[n][d] = mv1*(mv1*v[n][d] + 0.5*(w_dt*mv2*f[n][d]))+0.5*accel[ga][d]*dt; } else { v[n][d] = 0.0; } } } } /* do_update_vv_vel */ static void do_update_vv_pos(int start, int nrend, double dt, t_grp_tcstat *tcstat, t_grp_acc *gstat, rvec accel[], ivec nFreeze[], real invmass[], unsigned short ptype[], unsigned short cFREEZE[], rvec x[], rvec xprime[], rvec v[], rvec f[], gmx_bool bExtended, real veta, real alpha) { double imass, w_dt; int gf = 0; int n, d; double g, mr1, mr2; /* Would it make more sense if Parrinello-Rahman was put here? */ if (bExtended) { g = 0.5*dt*veta; mr1 = exp(g); mr2 = series_sinhx(g); } else { mr1 = 1.0; mr2 = 1.0; } for (n = start; n < nrend; n++) { if (cFREEZE) { gf = cFREEZE[n]; } for (d = 0; d < DIM; d++) { if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) { xprime[n][d] = mr1*(mr1*x[n][d]+mr2*dt*v[n][d]); } else { xprime[n][d] = x[n][d]; } } } } /* do_update_vv_pos */ static void do_update_visc(int start, int nrend, double dt, t_grp_tcstat *tcstat, double nh_vxi[], real invmass[], unsigned short ptype[], unsigned short cTC[], rvec x[], rvec xprime[], rvec v[], rvec f[], matrix M, matrix box, real cos_accel, real vcos, gmx_bool bNH, gmx_bool bPR) { double imass, w_dt; int gt = 0; real vn, vc; real lg, vxi = 0, vv; real fac, cosz; rvec vrel; int n, d; fac = 2*M_PI/(box[ZZ][ZZ]); if (bNH || bPR) { /* Update with coupling to extended ensembles, used for * Nose-Hoover and Parrinello-Rahman coupling */ for (n = start; n < nrend; n++) { imass = invmass[n]; if (cTC) { gt = cTC[n]; } lg = tcstat[gt].lambda; cosz = cos(fac*x[n][ZZ]); copy_rvec(v[n], vrel); vc = cosz*vcos; vrel[XX] -= vc; if (bNH) { vxi = nh_vxi[gt]; } for (d = 0; d < DIM; d++) { vn = v[n][d]; if ((ptype[n] != eptVSite) && (ptype[n] != eptShell)) { vn = (lg*vrel[d] + dt*(imass*f[n][d] - 0.5*vxi*vrel[d] - iprod(M[d], vrel)))/(1 + 0.5*vxi*dt); if (d == XX) { vn += vc + dt*cosz*cos_accel; } v[n][d] = vn; xprime[n][d] = x[n][d]+vn*dt; } else { xprime[n][d] = x[n][d]; } } } } else { /* Classic version of update, used with berendsen coupling */ for (n = start; n < nrend; n++) { w_dt = invmass[n]*dt; if (cTC) { gt = cTC[n]; } lg = tcstat[gt].lambda; cosz = cos(fac*x[n][ZZ]); for (d = 0; d < DIM; d++) { vn = v[n][d]; if ((ptype[n] != eptVSite) && (ptype[n] != eptShell)) { if (d == XX) { vc = cosz*vcos; /* Do not scale the cosine velocity profile */ vv = vc + lg*(vn - vc + f[n][d]*w_dt); /* Add the cosine accelaration profile */ vv += dt*cosz*cos_accel; } else { vv = lg*(vn + f[n][d]*w_dt); } v[n][d] = vv; xprime[n][d] = x[n][d]+vv*dt; } else { v[n][d] = 0.0; xprime[n][d] = x[n][d]; } } } } } /* Allocates and initializes sd->gaussrand[i] for i=1, i<sd->ngaussrand, * Using seeds generated from sd->gaussrand[0]. */ static void init_multiple_gaussrand(gmx_stochd_t *sd) { int ngr, i; unsigned int *seed; ngr = sd->ngaussrand; snew(seed, ngr); for (i = 1; i < ngr; i++) { seed[i] = gmx_rng_uniform_uint32(sd->gaussrand[0]); } if (ngr != gmx_omp_nthreads_get(emntUpdate)) { gmx_incons("The number of Gaussian number generators should be equal to gmx_omp_nthreads_get(emntUpdate)"); } #pragma omp parallel num_threads(gmx_omp_nthreads_get(emntUpdate)) { int th; th = gmx_omp_get_thread_num(); if (th > 0) { /* Initialize on each thread to get memory allocated thread-local */ sd->gaussrand[th] = gmx_rng_init(seed[th]); } } sfree(seed); } static gmx_stochd_t *init_stochd(FILE *fplog, t_inputrec *ir, int nthreads) { gmx_stochd_t *sd; gmx_sd_const_t *sdc; int ngtc, n, th; real y; snew(sd, 1); /* Initiate random number generator for langevin type dynamics, * for BD, SD or velocity rescaling temperature coupling. */ if (ir->eI == eiBD || EI_SD(ir->eI)) { sd->ngaussrand = nthreads; } else { sd->ngaussrand = 1; } snew(sd->gaussrand, sd->ngaussrand); /* Initialize the first random generator */ sd->gaussrand[0] = gmx_rng_init(ir->ld_seed); if (sd->ngaussrand > 1) { /* Initialize the rest of the random number generators, * using the first one to generate seeds. */ init_multiple_gaussrand(sd); } ngtc = ir->opts.ngtc; if (ir->eI == eiBD) { snew(sd->bd_rf, ngtc); } else if (EI_SD(ir->eI)) { snew(sd->sdc, ngtc); snew(sd->sdsig, ngtc); sdc = sd->sdc; for (n = 0; n < ngtc; n++) { if (ir->opts.tau_t[n] > 0) { sdc[n].gdt = ir->delta_t/ir->opts.tau_t[n]; sdc[n].eph = exp(sdc[n].gdt/2); sdc[n].emh = exp(-sdc[n].gdt/2); sdc[n].em = exp(-sdc[n].gdt); } else { /* No friction and noise on this group */ sdc[n].gdt = 0; sdc[n].eph = 1; sdc[n].emh = 1; sdc[n].em = 1; } if (sdc[n].gdt >= 0.05) { sdc[n].b = sdc[n].gdt*(sdc[n].eph*sdc[n].eph - 1) - 4*(sdc[n].eph - 1)*(sdc[n].eph - 1); sdc[n].c = sdc[n].gdt - 3 + 4*sdc[n].emh - sdc[n].em; sdc[n].d = 2 - sdc[n].eph - sdc[n].emh; } else { y = sdc[n].gdt/2; /* Seventh order expansions for small y */ sdc[n].b = y*y*y*y*(1/3.0+y*(1/3.0+y*(17/90.0+y*7/9.0))); sdc[n].c = y*y*y*(2/3.0+y*(-1/2.0+y*(7/30.0+y*(-1/12.0+y*31/1260.0)))); sdc[n].d = y*y*(-1+y*y*(-1/12.0-y*y/360.0)); } if (debug) { fprintf(debug, "SD const tc-grp %d: b %g c %g d %g\n", n, sdc[n].b, sdc[n].c, sdc[n].d); } } } else if (ETC_ANDERSEN(ir->etc)) { int ngtc; t_grpopts *opts; real reft; opts = &ir->opts; ngtc = opts->ngtc; snew(sd->randomize_group, ngtc); snew(sd->boltzfac, ngtc); /* for now, assume that all groups, if randomized, are randomized at the same rate, i.e. tau_t is the same. */ /* since constraint groups don't necessarily match up with temperature groups! This is checked in readir.c */ for (n = 0; n < ngtc; n++) { reft = max(0.0, opts->ref_t[n]); if ((opts->tau_t[n] > 0) && (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */ { sd->randomize_group[n] = TRUE; sd->boltzfac[n] = BOLTZ*opts->ref_t[n]; } else { sd->randomize_group[n] = FALSE; } } } return sd; } void get_stochd_state(gmx_update_t upd, t_state *state) { /* Note that we only get the state of the first random generator, * even if there are multiple. This avoids repetition. */ gmx_rng_get_state(upd->sd->gaussrand[0], state->ld_rng, state->ld_rngi); } void set_stochd_state(gmx_update_t upd, t_state *state) { gmx_stochd_t *sd; int i; sd = upd->sd; gmx_rng_set_state(sd->gaussrand[0], state->ld_rng, state->ld_rngi[0]); if (sd->ngaussrand > 1) { /* We only end up here with SD or BD with OpenMP. * Destroy and reinitialize the rest of the random number generators, * using seeds generated from the first one. * Although this doesn't recover the previous state, * it at least avoids repetition, which is most important. * Exaclty restoring states with all MPI+OpenMP setups is difficult * and as the integrator is random to start with, doesn't gain us much. */ for (i = 1; i < sd->ngaussrand; i++) { gmx_rng_destroy(sd->gaussrand[i]); } init_multiple_gaussrand(sd); } } gmx_update_t init_update(FILE *fplog, t_inputrec *ir) { t_gmx_update *upd; snew(upd, 1); if (ir->eI == eiBD || EI_SD(ir->eI) || ir->etc == etcVRESCALE || ETC_ANDERSEN(ir->etc)) { upd->sd = init_stochd(fplog, ir, gmx_omp_nthreads_get(emntUpdate)); } upd->xp = NULL; upd->xp_nalloc = 0; upd->randatom = NULL; upd->randatom_list = NULL; upd->randatom_list_init = FALSE; /* we have not yet cleared the data structure at this point */ return upd; } static void do_update_sd1(gmx_stochd_t *sd, gmx_rng_t gaussrand, int start, int nrend, double dt, rvec accel[], ivec nFreeze[], real invmass[], unsigned short ptype[], unsigned short cFREEZE[], unsigned short cACC[], unsigned short cTC[], rvec x[], rvec xprime[], rvec v[], rvec f[], rvec sd_X[], int ngtc, real tau_t[], real ref_t[]) { gmx_sd_const_t *sdc; gmx_sd_sigma_t *sig; real kT; int gf = 0, ga = 0, gt = 0; real ism, sd_V; int n, d; sdc = sd->sdc; sig = sd->sdsig; for (n = 0; n < ngtc; n++) { kT = BOLTZ*ref_t[n]; /* The mass is encounted for later, since this differs per atom */ sig[n].V = sqrt(kT*(1 - sdc[n].em*sdc[n].em)); } for (n = start; n < nrend; n++) { ism = sqrt(invmass[n]); if (cFREEZE) { gf = cFREEZE[n]; } if (cACC) { ga = cACC[n]; } if (cTC) { gt = cTC[n]; } for (d = 0; d < DIM; d++) { if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) { sd_V = ism*sig[gt].V*gmx_rng_gaussian_table(gaussrand); v[n][d] = v[n][d]*sdc[gt].em + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em) + sd_V; xprime[n][d] = x[n][d] + v[n][d]*dt; } else { v[n][d] = 0.0; xprime[n][d] = x[n][d]; } } } } static void check_sd2_work_data_allocation(gmx_stochd_t *sd, int nrend) { if (nrend > sd->sd_V_nalloc) { sd->sd_V_nalloc = over_alloc_dd(nrend); srenew(sd->sd_V, sd->sd_V_nalloc); } } static void do_update_sd2_Tconsts(gmx_stochd_t *sd, int ngtc, const real tau_t[], const real ref_t[]) { /* This is separated from the update below, because it is single threaded */ gmx_sd_const_t *sdc; gmx_sd_sigma_t *sig; int gt; real kT; sdc = sd->sdc; sig = sd->sdsig; for (gt = 0; gt < ngtc; gt++) { kT = BOLTZ*ref_t[gt]; /* The mass is encounted for later, since this differs per atom */ sig[gt].V = sqrt(kT*(1-sdc[gt].em)); sig[gt].X = sqrt(kT*sqr(tau_t[gt])*sdc[gt].c); sig[gt].Yv = sqrt(kT*sdc[gt].b/sdc[gt].c); sig[gt].Yx = sqrt(kT*sqr(tau_t[gt])*sdc[gt].b/(1-sdc[gt].em)); } } static void do_update_sd2(gmx_stochd_t *sd, gmx_rng_t gaussrand, gmx_bool bInitStep, int start, int nrend, rvec accel[], ivec nFreeze[], real invmass[], unsigned short ptype[], unsigned short cFREEZE[], unsigned short cACC[], unsigned short cTC[], rvec x[], rvec xprime[], rvec v[], rvec f[], rvec sd_X[], const real tau_t[], gmx_bool bFirstHalf) { gmx_sd_const_t *sdc; gmx_sd_sigma_t *sig; /* The random part of the velocity update, generated in the first * half of the update, needs to be remembered for the second half. */ rvec *sd_V; real kT; int gf = 0, ga = 0, gt = 0; real vn = 0, Vmh, Xmh; real ism; int n, d; sdc = sd->sdc; sig = sd->sdsig; sd_V = sd->sd_V; for (n = start; n < nrend; n++) { ism = sqrt(invmass[n]); if (cFREEZE) { gf = cFREEZE[n]; } if (cACC) { ga = cACC[n]; } if (cTC) { gt = cTC[n]; } for (d = 0; d < DIM; d++) { if (bFirstHalf) { vn = v[n][d]; } if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) { if (bFirstHalf) { if (bInitStep) { sd_X[n][d] = ism*sig[gt].X*gmx_rng_gaussian_table(gaussrand); } Vmh = sd_X[n][d]*sdc[gt].d/(tau_t[gt]*sdc[gt].c) + ism*sig[gt].Yv*gmx_rng_gaussian_table(gaussrand); sd_V[n][d] = ism*sig[gt].V*gmx_rng_gaussian_table(gaussrand); v[n][d] = vn*sdc[gt].em + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em) + sd_V[n][d] - sdc[gt].em*Vmh; xprime[n][d] = x[n][d] + v[n][d]*tau_t[gt]*(sdc[gt].eph - sdc[gt].emh); } else { /* Correct the velocities for the constraints. * This operation introduces some inaccuracy, * since the velocity is determined from differences in coordinates. */ v[n][d] = (xprime[n][d] - x[n][d])/(tau_t[gt]*(sdc[gt].eph - sdc[gt].emh)); Xmh = sd_V[n][d]*tau_t[gt]*sdc[gt].d/(sdc[gt].em-1) + ism*sig[gt].Yx*gmx_rng_gaussian_table(gaussrand); sd_X[n][d] = ism*sig[gt].X*gmx_rng_gaussian_table(gaussrand); xprime[n][d] += sd_X[n][d] - Xmh; } } else { if (bFirstHalf) { v[n][d] = 0.0; xprime[n][d] = x[n][d]; } } } } } static void do_update_bd_Tconsts(double dt, real friction_coefficient, int ngtc, const real ref_t[], real *rf) { /* This is separated from the update below, because it is single threaded */ int gt; if (friction_coefficient != 0) { for (gt = 0; gt < ngtc; gt++) { rf[gt] = sqrt(2.0*BOLTZ*ref_t[gt]/(friction_coefficient*dt)); } } else { for (gt = 0; gt < ngtc; gt++) { rf[gt] = sqrt(2.0*BOLTZ*ref_t[gt]); } } } static void do_update_bd(int start, int nrend, double dt, ivec nFreeze[], real invmass[], unsigned short ptype[], unsigned short cFREEZE[], unsigned short cTC[], rvec x[], rvec xprime[], rvec v[], rvec f[], real friction_coefficient, real *rf, gmx_rng_t gaussrand) { /* note -- these appear to be full step velocities . . . */ int gf = 0, gt = 0; real vn; real invfr = 0; int n, d; if (friction_coefficient != 0) { invfr = 1.0/friction_coefficient; } for (n = start; (n < nrend); n++) { if (cFREEZE) { gf = cFREEZE[n]; } if (cTC) { gt = cTC[n]; } for (d = 0; (d < DIM); d++) { if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) { if (friction_coefficient != 0) { vn = invfr*f[n][d] + rf[gt]*gmx_rng_gaussian_table(gaussrand); } else { /* NOTE: invmass = 2/(mass*friction_constant*dt) */ vn = 0.5*invmass[n]*f[n][d]*dt + sqrt(0.5*invmass[n])*rf[gt]*gmx_rng_gaussian_table(gaussrand); } v[n][d] = vn; xprime[n][d] = x[n][d]+vn*dt; } else { v[n][d] = 0.0; xprime[n][d] = x[n][d]; } } } } static void dump_it_all(FILE *fp, const char *title, int natoms, rvec x[], rvec xp[], rvec v[], rvec f[]) { #ifdef DEBUG if (fp) { fprintf(fp, "%s\n", title); pr_rvecs(fp, 0, "x", x, natoms); pr_rvecs(fp, 0, "xp", xp, natoms); pr_rvecs(fp, 0, "v", v, natoms); pr_rvecs(fp, 0, "f", f, natoms); } #endif } static void calc_ke_part_normal(rvec v[], t_grpopts *opts, t_mdatoms *md, gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel, gmx_bool bSaveEkinOld) { int g; t_grp_tcstat *tcstat = ekind->tcstat; t_grp_acc *grpstat = ekind->grpstat; int nthread, thread; /* three main: VV with AveVel, vv with AveEkin, leap with AveEkin. Leap with AveVel is also an option, but not supported now. Additionally, if we are doing iterations. bEkinAveVel: If TRUE, we sum into ekin, if FALSE, into ekinh. bSavEkinOld: If TRUE (in the case of iteration = bIterate is TRUE), we don't copy over the ekinh_old. If FALSE, we overrwrite it. */ /* group velocities are calculated in update_ekindata and * accumulated in acumulate_groups. * Now the partial global and groups ekin. */ for (g = 0; (g < opts->ngtc); g++) { if (!bSaveEkinOld) { copy_mat(tcstat[g].ekinh, tcstat[g].ekinh_old); } if (bEkinAveVel) { clear_mat(tcstat[g].ekinf); } else { clear_mat(tcstat[g].ekinh); } if (bEkinAveVel) { tcstat[g].ekinscalef_nhc = 1.0; /* need to clear this -- logic is complicated! */ } } ekind->dekindl_old = ekind->dekindl; nthread = gmx_omp_nthreads_get(emntUpdate); #pragma omp parallel for num_threads(nthread) schedule(static) for (thread = 0; thread < nthread; thread++) { int start_t, end_t, n; int ga, gt; rvec v_corrt; real hm; int d, m; matrix *ekin_sum; real *dekindl_sum; start_t = md->start + ((thread+0)*md->homenr)/nthread; end_t = md->start + ((thread+1)*md->homenr)/nthread; ekin_sum = ekind->ekin_work[thread]; dekindl_sum = ekind->dekindl_work[thread]; for (gt = 0; gt < opts->ngtc; gt++) { clear_mat(ekin_sum[gt]); } *dekindl_sum = 0.0; ga = 0; gt = 0; for (n = start_t; n < end_t; n++) { if (md->cACC) { ga = md->cACC[n]; } if (md->cTC) { gt = md->cTC[n]; } hm = 0.5*md->massT[n]; for (d = 0; (d < DIM); d++) { v_corrt[d] = v[n][d] - grpstat[ga].u[d]; } for (d = 0; (d < DIM); d++) { for (m = 0; (m < DIM); m++) { /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */ ekin_sum[gt][m][d] += hm*v_corrt[m]*v_corrt[d]; } } if (md->nMassPerturbed && md->bPerturbed[n]) { *dekindl_sum += 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt); } } } ekind->dekindl = 0; for (thread = 0; thread < nthread; thread++) { for (g = 0; g < opts->ngtc; g++) { if (bEkinAveVel) { m_add(tcstat[g].ekinf, ekind->ekin_work[thread][g], tcstat[g].ekinf); } else { m_add(tcstat[g].ekinh, ekind->ekin_work[thread][g], tcstat[g].ekinh); } } ekind->dekindl += *ekind->dekindl_work[thread]; } inc_nrnb(nrnb, eNR_EKIN, md->homenr); } static void calc_ke_part_visc(matrix box, rvec x[], rvec v[], t_grpopts *opts, t_mdatoms *md, gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel, gmx_bool bSaveEkinOld) { int start = md->start, homenr = md->homenr; int g, d, n, m, gt = 0; rvec v_corrt; real hm; t_grp_tcstat *tcstat = ekind->tcstat; t_cos_acc *cosacc = &(ekind->cosacc); real dekindl; real fac, cosz; double mvcos; for (g = 0; g < opts->ngtc; g++) { copy_mat(ekind->tcstat[g].ekinh, ekind->tcstat[g].ekinh_old); clear_mat(ekind->tcstat[g].ekinh); } ekind->dekindl_old = ekind->dekindl; fac = 2*M_PI/box[ZZ][ZZ]; mvcos = 0; dekindl = 0; for (n = start; n < start+homenr; n++) { if (md->cTC) { gt = md->cTC[n]; } hm = 0.5*md->massT[n]; /* Note that the times of x and v differ by half a step */ /* MRS -- would have to be changed for VV */ cosz = cos(fac*x[n][ZZ]); /* Calculate the amplitude of the new velocity profile */ mvcos += 2*cosz*md->massT[n]*v[n][XX]; copy_rvec(v[n], v_corrt); /* Subtract the profile for the kinetic energy */ v_corrt[XX] -= cosz*cosacc->vcos; for (d = 0; (d < DIM); d++) { for (m = 0; (m < DIM); m++) { /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */ if (bEkinAveVel) { tcstat[gt].ekinf[m][d] += hm*v_corrt[m]*v_corrt[d]; } else { tcstat[gt].ekinh[m][d] += hm*v_corrt[m]*v_corrt[d]; } } } if (md->nPerturbed && md->bPerturbed[n]) { dekindl += 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt); } } ekind->dekindl = dekindl; cosacc->mvcos = mvcos; inc_nrnb(nrnb, eNR_EKIN, homenr); } void calc_ke_part(t_state *state, t_grpopts *opts, t_mdatoms *md, gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel, gmx_bool bSaveEkinOld) { if (ekind->cosacc.cos_accel == 0) { calc_ke_part_normal(state->v, opts, md, ekind, nrnb, bEkinAveVel, bSaveEkinOld); } else { calc_ke_part_visc(state->box, state->x, state->v, opts, md, ekind, nrnb, bEkinAveVel, bSaveEkinOld); } } extern void init_ekinstate(ekinstate_t *ekinstate, const t_inputrec *ir) { ekinstate->ekin_n = ir->opts.ngtc; snew(ekinstate->ekinh, ekinstate->ekin_n); snew(ekinstate->ekinf, ekinstate->ekin_n); snew(ekinstate->ekinh_old, ekinstate->ekin_n); snew(ekinstate->ekinscalef_nhc, ekinstate->ekin_n); snew(ekinstate->ekinscaleh_nhc, ekinstate->ekin_n); snew(ekinstate->vscale_nhc, ekinstate->ekin_n); ekinstate->dekindl = 0; ekinstate->mvcos = 0; } void update_ekinstate(ekinstate_t *ekinstate, gmx_ekindata_t *ekind) { int i; for (i = 0; i < ekinstate->ekin_n; i++) { copy_mat(ekind->tcstat[i].ekinh, ekinstate->ekinh[i]); copy_mat(ekind->tcstat[i].ekinf, ekinstate->ekinf[i]); copy_mat(ekind->tcstat[i].ekinh_old, ekinstate->ekinh_old[i]); ekinstate->ekinscalef_nhc[i] = ekind->tcstat[i].ekinscalef_nhc; ekinstate->ekinscaleh_nhc[i] = ekind->tcstat[i].ekinscaleh_nhc; ekinstate->vscale_nhc[i] = ekind->tcstat[i].vscale_nhc; } copy_mat(ekind->ekin, ekinstate->ekin_total); ekinstate->dekindl = ekind->dekindl; ekinstate->mvcos = ekind->cosacc.mvcos; } void restore_ekinstate_from_state(t_commrec *cr, gmx_ekindata_t *ekind, ekinstate_t *ekinstate) { int i, n; if (MASTER(cr)) { for (i = 0; i < ekinstate->ekin_n; i++) { copy_mat(ekinstate->ekinh[i], ekind->tcstat[i].ekinh); copy_mat(ekinstate->ekinf[i], ekind->tcstat[i].ekinf); copy_mat(ekinstate->ekinh_old[i], ekind->tcstat[i].ekinh_old); ekind->tcstat[i].ekinscalef_nhc = ekinstate->ekinscalef_nhc[i]; ekind->tcstat[i].ekinscaleh_nhc = ekinstate->ekinscaleh_nhc[i]; ekind->tcstat[i].vscale_nhc = ekinstate->vscale_nhc[i]; } copy_mat(ekinstate->ekin_total, ekind->ekin); ekind->dekindl = ekinstate->dekindl; ekind->cosacc.mvcos = ekinstate->mvcos; n = ekinstate->ekin_n; } if (PAR(cr)) { gmx_bcast(sizeof(n), &n, cr); for (i = 0; i < n; i++) { gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh[0][0]), ekind->tcstat[i].ekinh[0], cr); gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinf[0][0]), ekind->tcstat[i].ekinf[0], cr); gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh_old[0][0]), ekind->tcstat[i].ekinh_old[0], cr); gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc), &(ekind->tcstat[i].ekinscalef_nhc), cr); gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc), &(ekind->tcstat[i].ekinscaleh_nhc), cr); gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc), &(ekind->tcstat[i].vscale_nhc), cr); } gmx_bcast(DIM*DIM*sizeof(ekind->ekin[0][0]), ekind->ekin[0], cr); gmx_bcast(sizeof(ekind->dekindl), &ekind->dekindl, cr); gmx_bcast(sizeof(ekind->cosacc.mvcos), &ekind->cosacc.mvcos, cr); } } void set_deform_reference_box(gmx_update_t upd, gmx_large_int_t step, matrix box) { upd->deformref_step = step; copy_mat(box, upd->deformref_box); } static void deform(gmx_update_t upd, int start, int homenr, rvec x[], matrix box, matrix *scale_tot, const t_inputrec *ir, gmx_large_int_t step) { matrix bnew, invbox, mu; real elapsed_time; int i, j; elapsed_time = (step + 1 - upd->deformref_step)*ir->delta_t; copy_mat(box, bnew); for (i = 0; i < DIM; i++) { for (j = 0; j < DIM; j++) { if (ir->deform[i][j] != 0) { bnew[i][j] = upd->deformref_box[i][j] + elapsed_time*ir->deform[i][j]; } } } /* We correct the off-diagonal elements, * which can grow indefinitely during shearing, * so the shifts do not get messed up. */ for (i = 1; i < DIM; i++) { for (j = i-1; j >= 0; j--) { while (bnew[i][j] - box[i][j] > 0.5*bnew[j][j]) { rvec_dec(bnew[i], bnew[j]); } while (bnew[i][j] - box[i][j] < -0.5*bnew[j][j]) { rvec_inc(bnew[i], bnew[j]); } } } m_inv_ur0(box, invbox); copy_mat(bnew, box); mmul_ur0(box, invbox, mu); for (i = start; i < start+homenr; i++) { x[i][XX] = mu[XX][XX]*x[i][XX]+mu[YY][XX]*x[i][YY]+mu[ZZ][XX]*x[i][ZZ]; x[i][YY] = mu[YY][YY]*x[i][YY]+mu[ZZ][YY]*x[i][ZZ]; x[i][ZZ] = mu[ZZ][ZZ]*x[i][ZZ]; } if (*scale_tot) { /* The transposes of the scaling matrices are stored, * so we need to do matrix multiplication in the inverse order. */ mmul_ur0(*scale_tot, mu, *scale_tot); } } static void combine_forces(int nstcalclr, gmx_constr_t constr, t_inputrec *ir, t_mdatoms *md, t_idef *idef, t_commrec *cr, gmx_large_int_t step, t_state *state, gmx_bool bMolPBC, int start, int nrend, rvec f[], rvec f_lr[], t_nrnb *nrnb) { int i, d, nm1; /* f contains the short-range forces + the long range forces * which are stored separately in f_lr. */ if (constr != NULL && !(ir->eConstrAlg == econtSHAKE && ir->epc == epcNO)) { /* We need to constrain the LR forces separately, * because due to the different pre-factor for the SR and LR * forces in the update algorithm, we can not determine * the constraint force for the coordinate constraining. * Constrain only the additional LR part of the force. */ /* MRS -- need to make sure this works with trotter integration -- the constraint calls may not be right.*/ constrain(NULL, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md, state->x, f_lr, f_lr, bMolPBC, state->box, state->lambda[efptBONDED], NULL, NULL, NULL, nrnb, econqForce, ir->epc == epcMTTK, state->veta, state->veta); } /* Add nstcalclr-1 times the LR force to the sum of both forces * and store the result in forces_lr. */ nm1 = nstcalclr - 1; for (i = start; i < nrend; i++) { for (d = 0; d < DIM; d++) { f_lr[i][d] = f[i][d] + nm1*f_lr[i][d]; } } } void update_tcouple(FILE *fplog, gmx_large_int_t step, t_inputrec *inputrec, t_state *state, gmx_ekindata_t *ekind, gmx_wallcycle_t wcycle, gmx_update_t upd, t_extmass *MassQ, t_mdatoms *md) { gmx_bool bTCouple = FALSE; real dttc; int i, start, end, homenr, offset; /* if using vv with trotter decomposition methods, we do this elsewhere in the code */ if (inputrec->etc != etcNO && !(IR_NVT_TROTTER(inputrec) || IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec))) { /* We should only couple after a step where energies were determined (for leapfrog versions) or the step energies are determined, for velocity verlet versions */ if (EI_VV(inputrec->eI)) { offset = 0; } else { offset = 1; } bTCouple = (inputrec->nsttcouple == 1 || do_per_step(step+inputrec->nsttcouple-offset, inputrec->nsttcouple)); } if (bTCouple) { dttc = inputrec->nsttcouple*inputrec->delta_t; switch (inputrec->etc) { case etcNO: break; case etcBERENDSEN: berendsen_tcoupl(inputrec, ekind, dttc); break; case etcNOSEHOOVER: nosehoover_tcoupl(&(inputrec->opts), ekind, dttc, state->nosehoover_xi, state->nosehoover_vxi, MassQ); break; case etcVRESCALE: vrescale_tcoupl(inputrec, ekind, dttc, state->therm_integral, upd->sd->gaussrand[0]); break; } /* rescale in place here */ if (EI_VV(inputrec->eI)) { rescale_velocities(ekind, md, md->start, md->start+md->homenr, state->v); } } else { /* Set the T scaling lambda to 1 to have no scaling */ for (i = 0; (i < inputrec->opts.ngtc); i++) { ekind->tcstat[i].lambda = 1.0; } } } void update_pcouple(FILE *fplog, gmx_large_int_t step, t_inputrec *inputrec, t_state *state, matrix pcoupl_mu, matrix M, gmx_wallcycle_t wcycle, gmx_update_t upd, gmx_bool bInitStep) { gmx_bool bPCouple = FALSE; real dtpc = 0; int i; /* if using Trotter pressure, we do this in coupling.c, so we leave it false. */ if (inputrec->epc != epcNO && (!(IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec)))) { /* We should only couple after a step where energies were determined */ bPCouple = (inputrec->nstpcouple == 1 || do_per_step(step+inputrec->nstpcouple-1, inputrec->nstpcouple)); } clear_mat(pcoupl_mu); for (i = 0; i < DIM; i++) { pcoupl_mu[i][i] = 1.0; } clear_mat(M); if (bPCouple) { dtpc = inputrec->nstpcouple*inputrec->delta_t; switch (inputrec->epc) { /* We can always pcoupl, even if we did not sum the energies * the previous step, since state->pres_prev is only updated * when the energies have been summed. */ case (epcNO): break; case (epcBERENDSEN): if (!bInitStep) { berendsen_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev, state->box, pcoupl_mu); } break; case (epcPARRINELLORAHMAN): parrinellorahman_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev, state->box, state->box_rel, state->boxv, M, pcoupl_mu, bInitStep); break; default: break; } } } static rvec *get_xprime(const t_state *state, gmx_update_t upd) { if (state->nalloc > upd->xp_nalloc) { upd->xp_nalloc = state->nalloc; srenew(upd->xp, upd->xp_nalloc); } return upd->xp; } void update_constraints(FILE *fplog, gmx_large_int_t step, real *dvdlambda, /* the contribution to be added to the bonded interactions */ t_inputrec *inputrec, /* input record and box stuff */ gmx_ekindata_t *ekind, t_mdatoms *md, t_state *state, gmx_bool bMolPBC, t_graph *graph, rvec force[], /* forces on home particles */ t_idef *idef, tensor vir_part, tensor vir, /* tensors for virial and ekin, needed for computing */ t_commrec *cr, t_nrnb *nrnb, gmx_wallcycle_t wcycle, gmx_update_t upd, gmx_constr_t constr, gmx_bool bInitStep, gmx_bool bFirstHalf, gmx_bool bCalcVir, real vetanew) { gmx_bool bExtended, bLastStep, bLog = FALSE, bEner = FALSE, bDoConstr = FALSE; double dt; real dt_1; int start, homenr, nrend, i, n, m, g, d; tensor vir_con; rvec *vbuf, *xprime = NULL; int nth, th; if (constr) { bDoConstr = TRUE; } if (bFirstHalf && !EI_VV(inputrec->eI)) { bDoConstr = FALSE; } /* for now, SD update is here -- though it really seems like it should be reformulated as a velocity verlet method, since it has two parts */ start = md->start; homenr = md->homenr; nrend = start+homenr; dt = inputrec->delta_t; dt_1 = 1.0/dt; /* * Steps (7C, 8C) * APPLY CONSTRAINTS: * BLOCK SHAKE * When doing PR pressure coupling we have to constrain the * bonds in each iteration. If we are only using Nose-Hoover tcoupling * it is enough to do this once though, since the relative velocities * after this will be normal to the bond vector */ if (bDoConstr) { /* clear out constraints before applying */ clear_mat(vir_part); xprime = get_xprime(state, upd); bLastStep = (step == inputrec->init_step+inputrec->nsteps); bLog = (do_per_step(step, inputrec->nstlog) || bLastStep || (step < 0)); bEner = (do_per_step(step, inputrec->nstenergy) || bLastStep); /* Constrain the coordinates xprime */ wallcycle_start(wcycle, ewcCONSTR); if (EI_VV(inputrec->eI) && bFirstHalf) { constrain(NULL, bLog, bEner, constr, idef, inputrec, ekind, cr, step, 1, md, state->x, state->v, state->v, bMolPBC, state->box, state->lambda[efptBONDED], dvdlambda, NULL, bCalcVir ? &vir_con : NULL, nrnb, econqVeloc, inputrec->epc == epcMTTK, state->veta, vetanew); } else { constrain(NULL, bLog, bEner, constr, idef, inputrec, ekind, cr, step, 1, md, state->x, xprime, NULL, bMolPBC, state->box, state->lambda[efptBONDED], dvdlambda, state->v, bCalcVir ? &vir_con : NULL, nrnb, econqCoord, inputrec->epc == epcMTTK, state->veta, state->veta); } wallcycle_stop(wcycle, ewcCONSTR); where(); dump_it_all(fplog, "After Shake", state->natoms, state->x, xprime, state->v, force); if (bCalcVir) { if (inputrec->eI == eiSD2) { /* A correction factor eph is needed for the SD constraint force */ /* Here we can, unfortunately, not have proper corrections * for different friction constants, so we use the first one. */ for (i = 0; i < DIM; i++) { for (m = 0; m < DIM; m++) { vir_part[i][m] += upd->sd->sdc[0].eph*vir_con[i][m]; } } } else { m_add(vir_part, vir_con, vir_part); } if (debug) { pr_rvecs(debug, 0, "constraint virial", vir_part, DIM); } } } where(); if ((inputrec->eI == eiSD2) && !(bFirstHalf)) { xprime = get_xprime(state, upd); nth = gmx_omp_nthreads_get(emntUpdate); #pragma omp parallel for num_threads(nth) schedule(static) for (th = 0; th < nth; th++) { int start_th, end_th; start_th = start + ((nrend-start)* th )/nth; end_th = start + ((nrend-start)*(th+1))/nth; /* The second part of the SD integration */ do_update_sd2(upd->sd, upd->sd->gaussrand[th], FALSE, start_th, end_th, inputrec->opts.acc, inputrec->opts.nFreeze, md->invmass, md->ptype, md->cFREEZE, md->cACC, md->cTC, state->x, xprime, state->v, force, state->sd_X, inputrec->opts.tau_t, FALSE); } inc_nrnb(nrnb, eNR_UPDATE, homenr); if (bDoConstr) { /* Constrain the coordinates xprime */ wallcycle_start(wcycle, ewcCONSTR); constrain(NULL, bLog, bEner, constr, idef, inputrec, NULL, cr, step, 1, md, state->x, xprime, NULL, bMolPBC, state->box, state->lambda[efptBONDED], dvdlambda, NULL, NULL, nrnb, econqCoord, FALSE, 0, 0); wallcycle_stop(wcycle, ewcCONSTR); } } /* We must always unshift after updating coordinates; if we did not shake x was shifted in do_force */ if (!(bFirstHalf)) /* in the first half of vv, no shift. */ { if (graph && (graph->nnodes > 0)) { unshift_x(graph, state->box, state->x, upd->xp); if (TRICLINIC(state->box)) { inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes); } else { inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes); } } else { #pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntUpdate)) schedule(static) for (i = start; i < nrend; i++) { copy_rvec(upd->xp[i], state->x[i]); } } dump_it_all(fplog, "After unshift", state->natoms, state->x, upd->xp, state->v, force); } /* ############# END the update of velocities and positions ######### */ } void update_box(FILE *fplog, gmx_large_int_t step, t_inputrec *inputrec, /* input record and box stuff */ t_mdatoms *md, t_state *state, t_graph *graph, rvec force[], /* forces on home particles */ matrix *scale_tot, matrix pcoupl_mu, t_nrnb *nrnb, gmx_wallcycle_t wcycle, gmx_update_t upd, gmx_bool bInitStep, gmx_bool bFirstHalf) { gmx_bool bExtended, bLastStep, bLog = FALSE, bEner = FALSE; double dt; real dt_1; int start, homenr, nrend, i, n, m, g; tensor vir_con; start = md->start; homenr = md->homenr; nrend = start+homenr; bExtended = (inputrec->etc == etcNOSEHOOVER) || (inputrec->epc == epcPARRINELLORAHMAN) || (inputrec->epc == epcMTTK); dt = inputrec->delta_t; where(); /* now update boxes */ switch (inputrec->epc) { case (epcNO): break; case (epcBERENDSEN): berendsen_pscale(inputrec, pcoupl_mu, state->box, state->box_rel, start, homenr, state->x, md->cFREEZE, nrnb); break; case (epcPARRINELLORAHMAN): /* The box velocities were updated in do_pr_pcoupl in the update * iteration, but we dont change the box vectors until we get here * since we need to be able to shift/unshift above. */ for (i = 0; i < DIM; i++) { for (m = 0; m <= i; m++) { state->box[i][m] += dt*state->boxv[i][m]; } } preserve_box_shape(inputrec, state->box_rel, state->box); /* Scale the coordinates */ for (n = start; (n < start+homenr); n++) { tmvmul_ur0(pcoupl_mu, state->x[n], state->x[n]); } break; case (epcMTTK): switch (inputrec->epct) { case (epctISOTROPIC): /* DIM * eta = ln V. so DIM*eta_new = DIM*eta_old + DIM*dt*veta => ln V_new = ln V_old + 3*dt*veta => V_new = V_old*exp(3*dt*veta) => Side length scales as exp(veta*dt) */ msmul(state->box, exp(state->veta*dt), state->box); /* Relate veta to boxv. veta = d(eta)/dT = (1/DIM)*1/V dV/dT. o If we assume isotropic scaling, and box length scaling factor L, then V = L^DIM (det(M)). So dV/dt = DIM L^(DIM-1) dL/dt det(M), and veta = (1/L) dL/dt. The determinant of B is L^DIM det(M), and the determinant of dB/dt is (dL/dT)^DIM det (M). veta will be (det(dB/dT)/det(B))^(1/3). Then since M = B_new*(vol_new)^(1/3), dB/dT_new = (veta_new)*B(new). */ msmul(state->box, state->veta, state->boxv); break; default: break; } break; default: break; } if ((!(IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec))) && scale_tot) { /* The transposes of the scaling matrices are stored, * therefore we need to reverse the order in the multiplication. */ mmul_ur0(*scale_tot, pcoupl_mu, *scale_tot); } if (DEFORM(*inputrec)) { deform(upd, start, homenr, state->x, state->box, scale_tot, inputrec, step); } where(); dump_it_all(fplog, "After update", state->natoms, state->x, upd->xp, state->v, force); } void update_coords(FILE *fplog, gmx_large_int_t step, t_inputrec *inputrec, /* input record and box stuff */ t_mdatoms *md, t_state *state, gmx_bool bMolPBC, rvec *f, /* forces on home particles */ gmx_bool bDoLR, rvec *f_lr, t_fcdata *fcd, gmx_ekindata_t *ekind, matrix M, gmx_wallcycle_t wcycle, gmx_update_t upd, gmx_bool bInitStep, int UpdatePart, t_commrec *cr, /* these shouldn't be here -- need to think about it */ t_nrnb *nrnb, gmx_constr_t constr, t_idef *idef) { gmx_bool bNH, bPR, bLastStep, bLog = FALSE, bEner = FALSE; double dt, alpha; real *imass, *imassin; rvec *force; real dt_1; int start, homenr, nrend, i, j, d, n, m, g; int blen0, blen1, iatom, jatom, nshake, nsettle, nconstr, nexpand; int *icom = NULL; tensor vir_con; rvec *vcom, *xcom, *vall, *xall, *xin, *vin, *forcein, *fall, *xpall, *xprimein, *xprime; int nth, th; /* Running the velocity half does nothing except for velocity verlet */ if ((UpdatePart == etrtVELOCITY1 || UpdatePart == etrtVELOCITY2) && !EI_VV(inputrec->eI)) { gmx_incons("update_coords called for velocity without VV integrator"); } start = md->start; homenr = md->homenr; nrend = start+homenr; xprime = get_xprime(state, upd); dt = inputrec->delta_t; dt_1 = 1.0/dt; /* We need to update the NMR restraint history when time averaging is used */ if (state->flags & (1<<estDISRE_RM3TAV)) { update_disres_history(fcd, &state->hist); } if (state->flags & (1<<estORIRE_DTAV)) { update_orires_history(fcd, &state->hist); } bNH = inputrec->etc == etcNOSEHOOVER; bPR = ((inputrec->epc == epcPARRINELLORAHMAN) || (inputrec->epc == epcMTTK)); if (bDoLR && inputrec->nstcalclr > 1 && !EI_VV(inputrec->eI)) /* get this working with VV? */ { /* Store the total force + nstcalclr-1 times the LR force * in forces_lr, so it can be used in a normal update algorithm * to produce twin time stepping. */ /* is this correct in the new construction? MRS */ combine_forces(inputrec->nstcalclr, constr, inputrec, md, idef, cr, step, state, bMolPBC, start, nrend, f, f_lr, nrnb); force = f_lr; } else { force = f; } /* ############# START The update of velocities and positions ######### */ where(); dump_it_all(fplog, "Before update", state->natoms, state->x, xprime, state->v, force); if (inputrec->eI == eiSD2) { check_sd2_work_data_allocation(upd->sd, nrend); do_update_sd2_Tconsts(upd->sd, inputrec->opts.ngtc, inputrec->opts.tau_t, inputrec->opts.ref_t); } if (inputrec->eI == eiBD) { do_update_bd_Tconsts(dt, inputrec->bd_fric, inputrec->opts.ngtc, inputrec->opts.ref_t, upd->sd->bd_rf); } nth = gmx_omp_nthreads_get(emntUpdate); #pragma omp parallel for num_threads(nth) schedule(static) private(alpha) for (th = 0; th < nth; th++) { int start_th, end_th; start_th = start + ((nrend-start)* th )/nth; end_th = start + ((nrend-start)*(th+1))/nth; switch (inputrec->eI) { case (eiMD): if (ekind->cosacc.cos_accel == 0) { do_update_md(start_th, end_th, dt, ekind->tcstat, state->nosehoover_vxi, ekind->bNEMD, ekind->grpstat, inputrec->opts.acc, inputrec->opts.nFreeze, md->invmass, md->ptype, md->cFREEZE, md->cACC, md->cTC, state->x, xprime, state->v, force, M, bNH, bPR); } else { do_update_visc(start_th, end_th, dt, ekind->tcstat, state->nosehoover_vxi, md->invmass, md->ptype, md->cTC, state->x, xprime, state->v, force, M, state->box, ekind->cosacc.cos_accel, ekind->cosacc.vcos, bNH, bPR); } break; case (eiSD1): do_update_sd1(upd->sd, upd->sd->gaussrand[th], start_th, end_th, dt, inputrec->opts.acc, inputrec->opts.nFreeze, md->invmass, md->ptype, md->cFREEZE, md->cACC, md->cTC, state->x, xprime, state->v, force, state->sd_X, inputrec->opts.ngtc, inputrec->opts.tau_t, inputrec->opts.ref_t); break; case (eiSD2): /* The SD update is done in 2 parts, because an extra constraint step * is needed */ do_update_sd2(upd->sd, upd->sd->gaussrand[th], bInitStep, start_th, end_th, inputrec->opts.acc, inputrec->opts.nFreeze, md->invmass, md->ptype, md->cFREEZE, md->cACC, md->cTC, state->x, xprime, state->v, force, state->sd_X, inputrec->opts.tau_t, TRUE); break; case (eiBD): do_update_bd(start_th, end_th, dt, inputrec->opts.nFreeze, md->invmass, md->ptype, md->cFREEZE, md->cTC, state->x, xprime, state->v, force, inputrec->bd_fric, upd->sd->bd_rf, upd->sd->gaussrand[th]); break; case (eiVV): case (eiVVAK): alpha = 1.0 + DIM/((double)inputrec->opts.nrdf[0]); /* assuming barostat coupled to group 0. */ switch (UpdatePart) { case etrtVELOCITY1: case etrtVELOCITY2: do_update_vv_vel(start_th, end_th, dt, ekind->tcstat, ekind->grpstat, inputrec->opts.acc, inputrec->opts.nFreeze, md->invmass, md->ptype, md->cFREEZE, md->cACC, state->v, force, (bNH || bPR), state->veta, alpha); break; case etrtPOSITION: do_update_vv_pos(start_th, end_th, dt, ekind->tcstat, ekind->grpstat, inputrec->opts.acc, inputrec->opts.nFreeze, md->invmass, md->ptype, md->cFREEZE, state->x, xprime, state->v, force, (bNH || bPR), state->veta, alpha); break; } break; default: gmx_fatal(FARGS, "Don't know how to update coordinates"); break; } } } void correct_ekin(FILE *log, int start, int end, rvec v[], rvec vcm, real mass[], real tmass, tensor ekin) { /* * This is a debugging routine. It should not be called for production code * * The kinetic energy should calculated according to: * Ekin = 1/2 m (v-vcm)^2 * However the correction is not always applied, since vcm may not be * known in time and we compute * Ekin' = 1/2 m v^2 instead * This can be corrected afterwards by computing * Ekin = Ekin' + 1/2 m ( -2 v vcm + vcm^2) * or in hsorthand: * Ekin = Ekin' - m v vcm + 1/2 m vcm^2 */ int i, j, k; real m, tm; rvec hvcm, mv; tensor dekin; /* Local particles */ clear_rvec(mv); /* Processor dependent part. */ tm = 0; for (i = start; (i < end); i++) { m = mass[i]; tm += m; for (j = 0; (j < DIM); j++) { mv[j] += m*v[i][j]; } } /* Shortcut */ svmul(1/tmass, vcm, vcm); svmul(0.5, vcm, hvcm); clear_mat(dekin); for (j = 0; (j < DIM); j++) { for (k = 0; (k < DIM); k++) { dekin[j][k] += vcm[k]*(tm*hvcm[j]-mv[j]); } } pr_rvecs(log, 0, "dekin", dekin, DIM); pr_rvecs(log, 0, " ekin", ekin, DIM); fprintf(log, "dekin = %g, ekin = %g vcm = (%8.4f %8.4f %8.4f)\n", trace(dekin), trace(ekin), vcm[XX], vcm[YY], vcm[ZZ]); fprintf(log, "mv = (%8.4f %8.4f %8.4f)\n", mv[XX], mv[YY], mv[ZZ]); } extern gmx_bool update_randomize_velocities(t_inputrec *ir, gmx_large_int_t step, t_mdatoms *md, t_state *state, gmx_update_t upd, t_idef *idef, gmx_constr_t constr) { int i; real rate = (ir->delta_t)/ir->opts.tau_t[0]; /* proceed with andersen if 1) it's fixed probability per particle andersen or 2) it's massive andersen and it's tau_t/dt */ if ((ir->etc == etcANDERSEN) || do_per_step(step, (int)(1.0/rate))) { srenew(upd->randatom, state->nalloc); srenew(upd->randatom_list, state->nalloc); if (upd->randatom_list_init == FALSE) { for (i = 0; i < state->nalloc; i++) { upd->randatom[i] = FALSE; upd->randatom_list[i] = 0; } upd->randatom_list_init = TRUE; } andersen_tcoupl(ir, md, state, upd->sd->gaussrand[0], rate, (ir->etc == etcANDERSEN) ? idef : NULL, constr ? get_nblocks(constr) : 0, constr ? get_sblock(constr) : NULL, upd->randatom, upd->randatom_list, upd->sd->randomize_group, upd->sd->boltzfac); return TRUE; } return FALSE; }
omp_dynamic_shared_memory.c
// RUN: %libomptarget-compile-nvptx64-nvidia-cuda -fopenmp-target-new-runtime // RUN: env LIBOMPTARGET_SHARED_MEMORY_SIZE=256 \ // RUN: %libomptarget-run-nvptx64-nvidia-cuda | %fcheck-nvptx64-nvidia-cuda // REQUIRES: nvptx64-nvidia-cuda #include <omp.h> #include <stdio.h> void *llvm_omp_get_dynamic_shared(); int main() { int x; #pragma omp target parallel map(from : x) { int *buf = llvm_omp_get_dynamic_shared() + 252; #pragma omp barrier if (omp_get_thread_num() == 0) *buf = 1; #pragma omp barrier if (omp_get_thread_num() == 1) x = *buf; } // CHECK: PASS if (x == 1 && llvm_omp_get_dynamic_shared() == NULL) printf("PASS\n"); }
convolution.h
/* Implicitly dealiased convolution routines. Copyright (C) 2010-2015 John C. Bowman and Malcolm Roberts, Univ. of Alberta This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "Complex.h" #include "fftw++.h" #include "cmult-sse2.h" #include "transposeoptions.h" namespace fftwpp { #ifndef __convolution_h__ #define __convolution_h__ 1 extern const double sqrt3; extern const double hsqrt3; extern const Complex hSqrt3; extern const Complex mhsqrt3; extern const Complex mhalf; extern const Complex zeta3; inline unsigned int min(unsigned int a, unsigned int b) { return (a < b) ? a : b; } inline unsigned int max(unsigned int a, unsigned int b) { return (a > b) ? a : b; } // Build the factored zeta tables. unsigned int BuildZeta(unsigned int n, unsigned int m, Complex *&ZetaH, Complex *&ZetaL, unsigned int threads=1); struct convolveOptions { unsigned int nx,ny,nz; // | unsigned int stride2,stride3; // | Used internally by the MPI interface. utils::mpiOptions mpi; // | bool toplevel; convolveOptions(unsigned int nx, unsigned int ny, unsigned int nz, unsigned int stride2, unsigned int stride3) : nx(nx), ny(ny), nz(nz), stride2(stride2), stride3(stride3), toplevel(true) {} convolveOptions(unsigned int nx, unsigned int ny, unsigned int stride2, utils::mpiOptions mpi, bool toplevel=true) : nx(nx), ny(ny), stride2(stride2), mpi(mpi), toplevel(toplevel) {} convolveOptions(unsigned int ny, unsigned int nz, unsigned int stride2, unsigned int stride3, utils::mpiOptions mpi, bool toplevel=true) : ny(ny), nz(nz), stride2(stride2), stride3(stride3), mpi(mpi), toplevel(toplevel) {} convolveOptions(bool toplevel=true) : nx(0), ny(0), nz(0), toplevel(toplevel) {} }; static const convolveOptions defaultconvolveOptions; typedef void multiplier(Complex **, unsigned int m, const unsigned int indexsize, const unsigned int *index, unsigned int r, unsigned int threads); typedef void realmultiplier(double **, unsigned int m, const unsigned int indexsize, const unsigned int *index, unsigned int r, unsigned int threads); // Multipliers for binary convolutions. multiplier multautoconvolution; multiplier multautocorrelation; multiplier multbinary; multiplier multcorrelation; multiplier multbinary2; multiplier multbinary3; multiplier multbinary4; multiplier multbinary8; realmultiplier multbinary; realmultiplier multbinary2; realmultiplier multadvection2; struct general {}; struct pretransform1 {}; struct pretransform2 {}; struct pretransform3 {}; struct pretransform4 {}; // In-place implicitly dealiased 1D complex convolution using // function pointers for multiplication class ImplicitConvolution : public ThreadBase { private: unsigned int m; Complex **U; unsigned int A; unsigned int B; Complex *u; unsigned int s; Complex *ZetaH, *ZetaL; fft1d *BackwardsO,*ForwardsO; fft1d *Backwards,*Forwards; bool pointers; bool allocated; unsigned int indexsize; public: unsigned int *index; void initpointers(Complex **&U, Complex *u) { unsigned int C=max(A,B); U=new Complex *[C]; for(unsigned int a=0; a < C; ++a) U[a]=u+a*m; pointers=true; } void deletepointers(Complex **&U) { delete [] U; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; } void init() { indexsize=0; Complex* U0=U[0]; Complex* U1=A == 1 ? utils::ComplexAlign(m) : U[1]; BackwardsO=new fft1d(m,1,U0,U1); ForwardsO=new fft1d(m,-1,U0,U1); threads=std::min(threads,max(BackwardsO->Threads(),ForwardsO->Threads())); if(A == B) { Backwards=new fft1d(m,1,U0); threads=std::min(threads,Backwards->Threads()); } if(A <= B) { Forwards=new fft1d(m,-1,U0); threads=std::min(threads,Forwards->Threads()); } if(A == 1) utils::deleteAlign(U1); s=BuildZeta(2*m,m,ZetaH,ZetaL,threads); } // m is the number of Complex data values. // U is an array of C distinct work arrays each of size m, where C=max(A,B) // A is the number of inputs. // B is the number of outputs. ImplicitConvolution(unsigned int m, Complex **U, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), U(U), A(A), B(B), pointers(false), allocated(false) { init(); } // m is the number of Complex data values. // u is a work array of C*m Complex values. // A is the number of inputs. // B is the number of outputs. ImplicitConvolution(unsigned int m, Complex *u, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), A(A), B(B), u(u), allocated(false) { initpointers(U,u); init(); } // m is the number of Complex data values. // A is the number of inputs. // B is the number of outputs. ImplicitConvolution(unsigned int m, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), A(A), B(B), allocated(true) { u=utils::ComplexAlign(max(A,B)*m); initpointers(U,u); init(); } ~ImplicitConvolution() { utils::deleteAlign(ZetaH); utils::deleteAlign(ZetaL); if(pointers) deletepointers(U); if(allocated) utils::deleteAlign(u); if(A == B) delete Backwards; if(A <= B) delete Forwards; delete ForwardsO; delete BackwardsO; } // F is an array of A pointers to distinct data blocks each of size m, // shifted by offset (contents not preserved). void convolve(Complex **F, multiplier *pmult, unsigned int i=0, unsigned int offset=0); void autoconvolve(Complex *f) { Complex *F[]={f}; convolve(F,multautoconvolution); } void autocorrelate(Complex *f) { Complex *F[]={f}; convolve(F,multautocorrelation); } // Binary convolution: void convolve(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multbinary); } // Binary correlation: void correlate(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multcorrelation); } template<class T> inline void pretransform(Complex **F, unsigned int k, Vec& Zetak); template<class T> void pretransform(Complex **F); void posttransform(Complex *f, Complex *u); }; // In-place implicitly dealiased 1D Hermitian convolution. class ImplicitHConvolution : public ThreadBase { protected: unsigned int m; unsigned int c; bool compact; Complex **U; unsigned int A; unsigned int B; Complex *u; unsigned int s; Complex *ZetaH,*ZetaL; rcfft1d *rc,*rco,*rcO; crfft1d *cr,*cro,*crO; Complex *w; // Work array of size max(A,B) to hold f[c] in even case. bool pointers; bool allocated; bool even; unsigned int indexsize; public: unsigned int *index; void initpointers(Complex **&U, Complex *u) { unsigned int C=max(A,B); U=new Complex *[C]; unsigned stride=c+1; for(unsigned int a=0; a < C; ++a) U[a]=u+a*stride; pointers=true; } void deletepointers(Complex **&U) { delete [] U; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; } void init() { even=m == 2*c; indexsize=0; Complex* U0=U[0]; rc=new rcfft1d(m,U0); cr=new crfft1d(m,U0); Complex* U1=A == 1 ? utils::ComplexAlign(m) : U[1]; rco=new rcfft1d(m,(double *) U0,U1); cro=new crfft1d(m,U1,(double *) U0); if(A == 1) utils::deleteAlign(U1); if(A != B) { rcO=rco; crO=cro; } else { rcO=rc; crO=cr; } threads=std::min(threads,std::max(rco->Threads(),cro->Threads())); s=BuildZeta(3*m,c+2,ZetaH,ZetaL,threads); w=even ? utils::ComplexAlign(max(A,B)) : u; } // m is the number of independent data values // U is an array of max(A,B) distinct work arrays of size c+1, where c=m/2 // A is the number of inputs. // B is the number of outputs. ImplicitHConvolution(unsigned int m, Complex **U, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(true), U(U), A(A), B(B), pointers(false), allocated(false) { init(); } ImplicitHConvolution(unsigned int m, bool compact, Complex **U, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(compact), U(U), A(A), B(B), pointers(false), allocated(false) { init(); } // m is the number of independent data values // u is a work array of max(A,B)*(c+1) Complex values, where c=m/2 // A is the number of inputs. // B is the number of outputs. ImplicitHConvolution(unsigned int m, Complex *u, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(true), A(A), B(B), u(u), allocated(false) { initpointers(U,u); init(); } ImplicitHConvolution(unsigned int m, bool compact, Complex *u, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(compact), A(A), B(B), u(u), allocated(false) { initpointers(U,u); init(); } // m is the number of independent data values // u is a work array of max(A,B)*(c+1) Complex values, where c=m/2 // A is the number of inputs. // B is the number of outputs. ImplicitHConvolution(unsigned int m, bool compact=true, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(compact), A(A), B(B), u(utils::ComplexAlign(max(A,B)*(c+1))), allocated(true) { initpointers(U,u); init(); } virtual ~ImplicitHConvolution() { if(even) utils::deleteAlign(w); utils::deleteAlign(ZetaH); utils::deleteAlign(ZetaL); if(pointers) deletepointers(U); if(allocated) utils::deleteAlign(u); if(A != B) { delete cro; delete rco; } delete cr; delete rc; } // F is an array of A pointers to distinct data blocks each of size m, // shifted by offset (contents not preserved). void convolve(Complex **F, realmultiplier *pmult, unsigned int i=0, unsigned int offset=0); void pretransform(Complex *F, Complex *f1c, Complex *U); void posttransform(Complex *F, const Complex& f1c, Complex *U); // Binary convolution: void convolve(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multbinary); } }; // Compute the scrambled implicitly m-padded complex Fourier transform of M // complex vectors, each of length m. // The arrays in and out (which may coincide), along with the array u, must // be allocated as Complex[M*m]. // // fftpad fft(m,M,stride); // fft.backwards(in,u); // fft.forwards(in,u); // // Notes: // stride is the spacing between the elements of each Complex vector. // class fftpad { unsigned int m; unsigned int M; unsigned int stride; unsigned int dist; unsigned int s; Complex *ZetaH, *ZetaL; unsigned int threads; public: mfft1d *Backwards; mfft1d *Forwards; fftpad(unsigned int m, unsigned int M, unsigned int stride, Complex *u=NULL, unsigned int Threads=fftw::maxthreads) : m(m), M(M), stride(stride), threads(Threads) { Backwards=new mfft1d(m,1,M,stride,1,u,NULL,threads); Forwards=new mfft1d(m,-1,M,stride,1,u,NULL,threads); threads=std::max(Backwards->Threads(),Forwards->Threads()); s=BuildZeta(2*m,m,ZetaH,ZetaL,threads); } ~fftpad() { utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete Forwards; delete Backwards; } void expand(Complex *f, Complex *u); void reduce(Complex *f, Complex *u); void backwards(Complex *f, Complex *u); void forwards(Complex *f, Complex *u); }; // Compute the scrambled implicitly m-padded complex Fourier transform of M // complex vectors, each of length 2m-1 with the origin at index m-1, // containing physical data for wavenumbers -m+1 to m-1. // The arrays in and out (which may coincide) must be allocated as // Complex[M*(2m-1)]. The array u must be allocated as Complex[M*(m+1)]. // // fft0pad fft(m,M,stride,u); // fft.backwards(in,u); // fft.forwards(in,u); // // Notes: // stride is the spacing between the elements of each Complex vector. // class fft0pad { protected: unsigned int m; unsigned int M; unsigned int s; unsigned int stride; Complex *ZetaH, *ZetaL; unsigned int threads; public: mfft1d *Forwards; mfft1d *Backwards; fft0pad(unsigned int m, unsigned int M, unsigned int stride, Complex *u=NULL, unsigned int Threads=fftw::maxthreads) : m(m), M(M), stride(stride), threads(Threads) { Backwards=new mfft1d(m,1,M,stride,1,u,NULL,threads); Forwards=new mfft1d(m,-1,M,stride,1,u,NULL,threads); s=BuildZeta(3*m,m,ZetaH,ZetaL); } virtual ~fft0pad() { utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete Forwards; delete Backwards; } // Unscramble indices, returning spatial index stored at position i inline static unsigned findex(unsigned i, unsigned int m) { return i < m-1 ? 3*i : 3*i+4-3*m; // for i >= m-1: j=3*(i-(m-1))+1 } inline static unsigned uindex(unsigned i, unsigned int m) { return i > 0 ? (i < m ? 3*i-1 : 3*m-3) : 3*m-1; } virtual void expand(Complex *f, Complex *u); virtual void reduce(Complex *f, Complex *u); void backwards(Complex *f, Complex *u); virtual void forwards(Complex *f, Complex *u); virtual void Backwards1(Complex *f, Complex *u); virtual void Forwards0(Complex *f); virtual void Forwards1(Complex *f, Complex *u); }; // Compute the scrambled implicitly m-padded complex Fourier transform of M // complex vectors, each of length 2m with the origin at index m, // corresponding to wavenumbers -m to m-1. // The arrays in and out (which may coincide) must be allocated as // Complex[M*2m]. The array u must be allocated as Complex[M*m]. // // fft1pad fft(m,M,stride,u); // fft.backwards(in,u); // fft.forwards(in,u); // // Notes: // stride is the spacing between the elements of each Complex vector. // class fft1pad : public fft0pad { public: fft1pad(unsigned int m, unsigned int M, unsigned int stride, Complex *u=NULL, unsigned int threads=fftw::maxthreads) : fft0pad(m,M,stride,u,threads) {} // Unscramble indices, returning spatial index stored at position i inline static unsigned findex(unsigned i, unsigned int m) { return i < m ? 3*i : 3*(i-m)+1; } inline static unsigned uindex(unsigned i, unsigned int m) { return i > 0 ? 3*i-1 : 3*m-1; } void expand(Complex *f, Complex *u); void reduce(Complex *f, Complex *u); void forwards(Complex *f, Complex *u); void Backwards1(Complex *f, Complex *u); void Forwards0(Complex *f); void Forwards1(Complex *f, Complex *u); }; // In-place implicitly dealiased 2D complex convolution. class ImplicitConvolution2 : public ThreadBase { protected: unsigned int mx,my; Complex *u1; Complex *u2; unsigned int A,B; fftpad *xfftpad; ImplicitConvolution **yconvolve; Complex **U2; bool allocated; unsigned int indexsize; bool toplevel; public: unsigned int *index; void initpointers2(Complex **&U2, Complex *u2, unsigned int stride) { U2=new Complex *[A]; for(unsigned int a=0; a < A; ++a) U2[a]=u2+a*stride; if(toplevel) allocateindex(1,new unsigned int[1]); } void deletepointers2(Complex **&U2) { if(toplevel) { delete [] index; for(unsigned int t=1; t < threads; ++t) delete [] yconvolve[t]->index; } delete [] U2; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; yconvolve[0]->allocateindex(n,i); for(unsigned int t=1; t < threads; ++t) yconvolve[t]->allocateindex(n,new unsigned int[n]); } void init(const convolveOptions& options) { toplevel=options.toplevel; xfftpad=new fftpad(mx,options.ny,options.ny,u2,threads); unsigned int C=max(A,B); yconvolve=new ImplicitConvolution*[threads]; for(unsigned int t=0; t < threads; ++t) yconvolve[t]=new ImplicitConvolution(my,u1+t*my*C,A,B,innerthreads); initpointers2(U2,u2,options.stride2); } void set(convolveOptions& options) { if(options.nx == 0) options.nx=mx; if(options.ny == 0) { options.ny=my; options.stride2=mx*my; } } // u1 is a temporary array of size my*C*threads. // u2 is a temporary array of size mx*my*C. // A is the number of inputs. // B is the number of outputs. // Here C=max(A,B). ImplicitConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *u2, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), u1(u1), u2(u2), A(A), B(B), allocated(false) { set(options); multithread(options.nx); init(options); } ImplicitConvolution2(unsigned int mx, unsigned int my, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), A(A), B(B), allocated(true) { set(options); multithread(options.nx); unsigned int C=max(A,B); u1=utils::ComplexAlign(my*C*threads); u2=utils::ComplexAlign(options.stride2*C); init(options); } virtual ~ImplicitConvolution2() { deletepointers2(U2); for(unsigned int t=0; t < threads; ++t) delete yconvolve[t]; delete [] yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(u2); utils::deleteAlign(u1); } } void backwards(Complex **F, Complex **U2, unsigned int offset) { for(unsigned int a=0; a < A; ++a) xfftpad->backwards(F[a]+offset,U2[a]); } void subconvolution(Complex **F, multiplier *pmult, unsigned int r, unsigned int M, unsigned int stride, unsigned int offset=0) { if(threads > 1) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < M; ++i) yconvolve[get_thread_num()]->convolve(F,pmult,2*i+r,offset+i*stride); } else { ImplicitConvolution *yconvolve0=yconvolve[0]; for(unsigned int i=0; i < M; ++i) yconvolve0->convolve(F,pmult,2*i+r,offset+i*stride); } } void forwards(Complex **F, Complex **U2, unsigned int offset) { for(unsigned int b=0; b < B; ++b) xfftpad->forwards(F[b]+offset,U2[b]); } // F is a pointer to A distinct data blocks each of size mx*my, // shifted by offset (contents not preserved). virtual void convolve(Complex **F, multiplier *pmult, unsigned int i=0, unsigned int offset=0) { if(!toplevel) { index[indexsize-2]=i; if(threads > 1) { for(unsigned int t=1; t < threads; ++t) { unsigned int *Index=yconvolve[t]->index; for(unsigned int i=0; i < indexsize; ++i) Index[i]=index[i]; } } } backwards(F,U2,offset); subconvolution(F,pmult,0,mx,my,offset); subconvolution(U2,pmult,1,mx,my); forwards(F,U2,offset); } // Binary convolution: void convolve(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multbinary); } // Binary correlation: void correlate(Complex *f, Complex *g) { Complex *F[]={f, g}; convolve(F,multcorrelation); } void autoconvolve(Complex *f) { Complex *F[]={f}; convolve(F,multautoconvolution); } void autocorrelate(Complex *f) { Complex *F[]={f}; convolve(F,multautocorrelation); } }; inline void HermitianSymmetrizeX(unsigned int mx, unsigned int my, unsigned int xorigin, Complex *f) { unsigned int offset=xorigin*my; unsigned int stop=mx*my; f[offset].im=0.0; for(unsigned int i=my; i < stop; i += my) f[offset-i]=conj(f[offset+i]); } // Enforce 3D Hermiticity using specified (x,y > 0,z=0) and (x >= 0,y=0,z=0) // data. inline void HermitianSymmetrizeXY(unsigned int mx, unsigned int my, unsigned int mz, unsigned int xorigin, unsigned int yorigin, Complex *f, unsigned int threads=fftw::maxthreads) { int stride=(yorigin+my)*mz; int mxstride=mx*stride; unsigned int myz=my*mz; unsigned int origin=xorigin*stride+yorigin*mz; f[origin].im=0.0; for(int i=stride; i < mxstride; i += stride) f[origin-i]=conj(f[origin+i]); PARALLEL( for(int i=stride-mxstride; i < mxstride; i += stride) { int stop=i+myz; for(int j=i+mz; j < stop; j += mz) { f[origin-j]=conj(f[origin+j]); } } ); } typedef unsigned int IndexFunction(unsigned int, unsigned int m); class ImplicitHConvolution2 : public ThreadBase { protected: unsigned int mx,my; bool xcompact,ycompact; Complex *u1; Complex *u2; unsigned int A,B; fft0pad *xfftpad; ImplicitHConvolution **yconvolve; Complex **U2; bool allocated; unsigned int indexsize; bool toplevel; public: unsigned int *index; void initpointers2(Complex **&U2, Complex *u2, unsigned int stride) { unsigned int C=max(A,B); U2=new Complex *[C]; for(unsigned int a=0; a < C; ++a) U2[a]=u2+a*stride; if(toplevel) allocateindex(1,new unsigned int[1]); } void deletepointers2(Complex **&U2) { if(toplevel) { delete [] index; for(unsigned int t=1; t < threads; ++t) delete [] yconvolve[t]->index; } delete [] U2; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; yconvolve[0]->allocateindex(n,i); for(unsigned int t=1; t < threads; ++t) yconvolve[t]->allocateindex(n,new unsigned int[n]); } void init(const convolveOptions& options) { unsigned int C=max(A,B); toplevel=options.toplevel; xfftpad=xcompact ? new fft0pad(mx,options.ny,options.ny,u2) : new fft1pad(mx,options.ny,options.ny,u2); yconvolve=new ImplicitHConvolution*[threads]; for(unsigned int t=0; t < threads; ++t) yconvolve[t]=new ImplicitHConvolution(my,ycompact,u1+t*(my/2+1)*C,A,B, innerthreads); initpointers2(U2,u2,options.stride2); } void set(convolveOptions& options) { if(options.nx == 0) options.nx=mx; if(options.ny == 0) { options.ny=my+!ycompact; options.stride2=(mx+xcompact)*options.ny; } } // u1 is a temporary array of size (my/2+1)*C*threads. // u2 is a temporary array of size (mx+xcompact)*(my+!ycompact)*C; // A is the number of inputs. // B is the number of outputs. // Here C=max(A,B). ImplicitHConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *u2, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), xcompact(true), ycompact(true), u1(u1), u2(u2), A(A), B(B), allocated(false) { set(options); multithread(options.nx); init(options); } ImplicitHConvolution2(unsigned int mx, unsigned int my, bool xcompact, bool ycompact, Complex *u1, Complex *u2, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), xcompact(xcompact), ycompact(ycompact), u1(u1), u2(u2), A(A), B(B), allocated(false) { set(options); multithread(options.nx); init(options); } ImplicitHConvolution2(unsigned int mx, unsigned int my, bool xcompact=true, bool ycompact=true, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), xcompact(xcompact), ycompact(ycompact), A(A), B(B), allocated(true) { set(options); multithread(options.nx); unsigned int C=max(A,B); u1=utils::ComplexAlign((my/2+1)*C*threads); u2=utils::ComplexAlign(options.stride2*C); init(options); } virtual ~ImplicitHConvolution2() { deletepointers2(U2); for(unsigned int t=0; t < threads; ++t) delete yconvolve[t]; delete [] yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(u2); utils::deleteAlign(u1); } } void backwards(Complex **F, Complex **U2, unsigned int ny, bool symmetrize, unsigned int offset) { for(unsigned int a=0; a < A; ++a) { Complex *f=F[a]+offset; if(symmetrize) HermitianSymmetrizeX(mx,ny,mx-xcompact,f); xfftpad->backwards(f,U2[a]); } } void subconvolution(Complex **F, realmultiplier *pmult, IndexFunction indexfunction, unsigned int M, unsigned int stride, unsigned int offset=0) { if(threads > 1) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < M; ++i) yconvolve[get_thread_num()]->convolve(F,pmult,indexfunction(i,mx), offset+i*stride); } else { ImplicitHConvolution *yconvolve0=yconvolve[0]; for(unsigned int i=0; i < M; ++i) yconvolve0->convolve(F,pmult,indexfunction(i,mx),offset+i*stride); } } void forwards(Complex **F, Complex **U2, unsigned int offset) { for(unsigned int b=0; b < B; ++b) xfftpad->forwards(F[b]+offset,U2[b]); } // F is a pointer to A distinct data blocks each of size // (2mx-compact)*(my+!ycompact), shifted by offset (contents not preserved). virtual void convolve(Complex **F, realmultiplier *pmult, bool symmetrize=true, unsigned int i=0, unsigned int offset=0) { if(!toplevel) { index[indexsize-2]=i; if(threads > 1) { for(unsigned int t=1; t < threads; ++t) { unsigned int *Index=yconvolve[t]->index; for(unsigned int i=0; i < indexsize; ++i) Index[i]=index[i]; } } } unsigned stride=my+!ycompact; backwards(F,U2,stride,symmetrize,offset); subconvolution(F,pmult,xfftpad->findex,2*mx-xcompact,stride,offset); subconvolution(U2,pmult,xfftpad->uindex,mx+xcompact,stride); forwards(F,U2,offset); } // Binary convolution: void convolve(Complex *f, Complex *g, bool symmetrize=true) { Complex *F[]={f,g}; convolve(F,multbinary,symmetrize); } }; // In-place implicitly dealiased 3D complex convolution. class ImplicitConvolution3 : public ThreadBase { protected: unsigned int mx,my,mz; Complex *u1; Complex *u2; Complex *u3; unsigned int A,B; fftpad *xfftpad; ImplicitConvolution2 **yzconvolve; Complex **U3; bool allocated; unsigned int indexsize; bool toplevel; public: unsigned int *index; void initpointers3(Complex **&U3, Complex *u3, unsigned int stride) { unsigned int C=max(A,B); U3=new Complex *[C]; for(unsigned int a=0; a < C; ++a) U3[a]=u3+a*stride; if(toplevel) allocateindex(2,new unsigned int[2]); } void deletepointers3(Complex **&U3) { if(toplevel) { delete [] index; for(unsigned int t=1; t < threads; ++t) delete [] yzconvolve[t]->index; } delete [] U3; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; yzconvolve[0]->allocateindex(n,i); for(unsigned int t=1; t < threads; ++t) yzconvolve[t]->allocateindex(n,new unsigned int[n]); } void init(const convolveOptions& options) { toplevel=options.toplevel; unsigned int nyz=options.ny*options.nz; xfftpad=new fftpad(mx,nyz,nyz,u3,threads); if(options.nz == mz) { unsigned int C=max(A,B); yzconvolve=new ImplicitConvolution2*[threads]; for(unsigned int t=0; t < threads; ++t) yzconvolve[t]=new ImplicitConvolution2(my,mz,u1+t*mz*C*innerthreads, u2+t*options.stride2*C,A,B, innerthreads,false); initpointers3(U3,u3,options.stride3); } else yzconvolve=NULL; } void set(convolveOptions &options) { if(options.ny == 0) { options.ny=my; options.nz=mz; options.stride2=my*mz; options.stride3=mx*my*mz; } } // u1 is a temporary array of size mz*C*threads. // u2 is a temporary array of size my*mz*C*threads. // u3 is a temporary array of size mx*my*mz*C. // A is the number of inputs. // B is the number of outputs. // Here C=max(A,B). ImplicitConvolution3(unsigned int mx, unsigned int my, unsigned int mz, Complex *u1, Complex *u2, Complex *u3, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), u1(u1), u2(u2), u3(u3), A(A), B(B), allocated(false) { set(options); multithread(mx); init(options); } ImplicitConvolution3(unsigned int mx, unsigned int my, unsigned int mz, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), A(A), B(B), allocated(true) { set(options); multithread(mx); unsigned int C=max(A,B); u1=utils::ComplexAlign(mz*C*threads*innerthreads); u2=utils::ComplexAlign(options.stride2*C*threads); u3=utils::ComplexAlign(options.stride3*C); init(options); } virtual ~ImplicitConvolution3() { if(yzconvolve) { deletepointers3(U3); for(unsigned int t=0; t < threads; ++t) delete yzconvolve[t]; delete [] yzconvolve; } delete xfftpad; if(allocated) { utils::deleteAlign(u3); utils::deleteAlign(u2); utils::deleteAlign(u1); } } void backwards(Complex **F, Complex **U3, unsigned int offset) { for(unsigned int a=0; a < A; ++a) xfftpad->backwards(F[a]+offset,U3[a]); } void subconvolution(Complex **F, multiplier *pmult, unsigned int r, unsigned int M, unsigned int stride, unsigned int offset=0) { if(threads > 1) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < M; ++i) yzconvolve[get_thread_num()]->convolve(F,pmult,2*i+r,offset+i*stride); } else { ImplicitConvolution2 *yzconvolve0=yzconvolve[0]; for(unsigned int i=0; i < M; ++i) { yzconvolve0->convolve(F,pmult,2*i+r,offset+i*stride); } } } void forwards(Complex **F, Complex **U3, unsigned int offset=0) { for(unsigned int b=0; b < B; ++b) xfftpad->forwards(F[b]+offset,U3[b]); } // F is a pointer to A distinct data blocks each of size mx*my*mz, // shifted by offset virtual void convolve(Complex **F, multiplier *pmult, unsigned int i=0, unsigned int offset=0) { if(!toplevel) { index[indexsize-3]=i; if(threads > 1) { for(unsigned int t=1; t < threads; ++t) { unsigned int *Index=yzconvolve[t]->index; for(unsigned int i=0; i < indexsize; ++i) Index[i]=index[i]; } } } unsigned int stride=my*mz; backwards(F,U3,offset); subconvolution(F,pmult,0,mx,stride,offset); subconvolution(U3,pmult,1,mx,stride); forwards(F,U3,offset); } // Binary convolution: void convolve(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multbinary); } // Binary correlation: void correlate(Complex *f, Complex *g) { Complex *F[]={f, g}; convolve(F,multcorrelation); } void autoconvolve(Complex *f) { Complex *F[]={f}; convolve(F,multautoconvolution); } void autocorrelate(Complex *f) { Complex *F[]={f}; convolve(F,multautocorrelation); } }; // In-place implicitly dealiased 3D Hermitian convolution. class ImplicitHConvolution3 : public ThreadBase { protected: unsigned int mx,my,mz; bool xcompact,ycompact,zcompact; Complex *u1; Complex *u2; Complex *u3; unsigned int A,B; fft0pad *xfftpad; ImplicitHConvolution2 **yzconvolve; Complex **U3; bool allocated; unsigned int indexsize; bool toplevel; public: unsigned int *index; void initpointers3(Complex **&U3, Complex *u3, unsigned int stride) { unsigned int C=max(A,B); U3=new Complex *[C]; for(unsigned int a=0; a < C; ++a) U3[a]=u3+a*stride; if(toplevel) allocateindex(2,new unsigned int[2]); } void deletepointers3(Complex **&U3) { if(toplevel) { delete [] index; for(unsigned int t=1; t < threads; ++t) delete [] yzconvolve[t]->index; } delete [] U3; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; yzconvolve[0]->allocateindex(n,i); for(unsigned int t=1; t < threads; ++t) yzconvolve[t]->allocateindex(n,new unsigned int[n]); } void init(const convolveOptions& options) { toplevel=options.toplevel; unsigned int nyz=options.ny*options.nz; xfftpad=xcompact ? new fft0pad(mx,nyz,nyz,u3) : new fft1pad(mx,nyz,nyz,u3); if(options.nz == mz+!zcompact) { unsigned int C=max(A,B); yzconvolve=new ImplicitHConvolution2*[threads]; for(unsigned int t=0; t < threads; ++t) yzconvolve[t]=new ImplicitHConvolution2(my,mz, ycompact,zcompact, u1+t*(mz/2+1)*C*innerthreads, u2+t*options.stride2*C, A,B,innerthreads,false); initpointers3(U3,u3,options.stride3); } else yzconvolve=NULL; } void set(convolveOptions& options) { if(options.ny == 0) { options.ny=2*my-ycompact; options.nz=mz+!zcompact; options.stride2=(my+ycompact)*options.nz; options.stride3=(mx+xcompact)*options.ny*options.nz; } } // u1 is a temporary array of size (mz/2+1)*C*threads. // u2 is a temporary array of size (my+ycompact)*(mz+!zcompact)*C*threads. // u3 is a temporary array of size // (mx+xcompact)*(2my-ycompact)*(mz+!zcompact)*C. // A is the number of inputs. // B is the number of outputs. // Here C=max(A,B). ImplicitHConvolution3(unsigned int mx, unsigned int my, unsigned int mz, Complex *u1, Complex *u2, Complex *u3, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), xcompact(true), ycompact(true), zcompact(true), u1(u1), u2(u2), u3(u3), A(A), B(B), allocated(false) { set(options); multithread(mx); init(options); } ImplicitHConvolution3(unsigned int mx, unsigned int my, unsigned int mz, bool xcompact, bool ycompact, bool zcompact, Complex *u1, Complex *u2, Complex *u3, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), xcompact(xcompact), ycompact(ycompact), zcompact(zcompact), u1(u1), u2(u2), u3(u3), A(A), B(B), allocated(false) { set(options); multithread(mx); init(options); } ImplicitHConvolution3(unsigned int mx, unsigned int my, unsigned int mz, bool xcompact=true, bool ycompact=true, bool zcompact=true, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), xcompact(xcompact), ycompact(ycompact), zcompact(zcompact), A(A), B(B), allocated(true) { set(options); multithread(mx); unsigned int C=max(A,B); u1=utils::ComplexAlign((mz/2+1)*C*threads*innerthreads); u2=utils::ComplexAlign(options.stride2*C*threads); u3=utils::ComplexAlign(options.stride3*C); init(options); } virtual ~ImplicitHConvolution3() { if(yzconvolve) { deletepointers3(U3); for(unsigned int t=0; t < threads; ++t) delete yzconvolve[t]; delete [] yzconvolve; } delete xfftpad; if(allocated) { utils::deleteAlign(u3); utils::deleteAlign(u2); utils::deleteAlign(u1); } } virtual void HermitianSymmetrize(Complex *f, Complex *u) { HermitianSymmetrizeXY(mx,my,mz+!zcompact,mx-xcompact,my-ycompact,f, threads); } void backwards(Complex **F, Complex **U3, bool symmetrize, unsigned int offset) { for(unsigned int a=0; a < A; ++a) { Complex *f=F[a]+offset; Complex *u=U3[a]; if(symmetrize) HermitianSymmetrize(f,u); xfftpad->backwards(f,u); } } void subconvolution(Complex **F, realmultiplier *pmult, IndexFunction indexfunction, unsigned int M, unsigned int stride, unsigned int offset=0) { if(threads > 1) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < M; ++i) yzconvolve[get_thread_num()]->convolve(F,pmult,false, indexfunction(i,mx), offset+i*stride); } else { ImplicitHConvolution2 *yzconvolve0=yzconvolve[0]; for(unsigned int i=0; i < M; ++i) yzconvolve0->convolve(F,pmult,false,indexfunction(i,mx), offset+i*stride); } } void forwards(Complex **F, Complex **U3, unsigned int offset=0) { for(unsigned int b=0; b < B; ++b) xfftpad->forwards(F[b]+offset,U3[b]); } // F is a pointer to A distinct data blocks each of size // (2mx-compact)*(2my-ycompact)*(mz+!zcompact), shifted by offset // (contents not preserved). virtual void convolve(Complex **F, realmultiplier *pmult, bool symmetrize=true, unsigned int i=0, unsigned int offset=0) { if(!toplevel) { index[indexsize-3]=i; if(threads > 1) { for(unsigned int t=1; t < threads; ++t) { unsigned int *Index=yzconvolve[t]->index; for(unsigned int i=0; i < indexsize; ++i) Index[i]=index[i]; } } } unsigned int stride=(2*my-ycompact)*(mz+!zcompact); backwards(F,U3,symmetrize,offset); subconvolution(F,pmult,xfftpad->findex,2*mx-xcompact,stride,offset); subconvolution(U3,pmult,xfftpad->uindex,mx+xcompact,stride); forwards(F,U3,offset); } // Binary convolution: void convolve(Complex *f, Complex *g, bool symmetrize=true) { Complex *F[]={f,g}; convolve(F,multbinary,symmetrize); } }; // In-place implicitly dealiased Hermitian ternary convolution. class ImplicitHTConvolution : public ThreadBase { protected: unsigned int m; Complex *u,*v,*w; unsigned int M; unsigned int s; rcfft1d *rc, *rco; crfft1d *cr, *cro; Complex *ZetaH, *ZetaL; Complex **W; bool allocated; unsigned int twom; unsigned int stride; public: void initpointers(Complex **&W, Complex *w) { W=new Complex *[M]; unsigned int m1=m+1; for(unsigned int s=0; s < M; ++s) W[s]=w+s*m1; } void deletepointers(Complex **&W) { delete [] W; } void init() { twom=2*m; stride=twom+2; rc=new rcfft1d(twom,u); cr=new crfft1d(twom,u); rco=new rcfft1d(twom,(double *) u,v); cro=new crfft1d(twom,v,(double *) u); threads=std::min(threads,std::max(rco->Threads(),cro->Threads())); s=BuildZeta(4*m,m,ZetaH,ZetaL,threads); initpointers(W,w); } // u, v, and w are distinct temporary arrays each of size (m+1)*M. ImplicitHTConvolution(unsigned int m, Complex *u, Complex *v, Complex *w, unsigned int M=1) : m(m), u(u), v(v), w(w), M(M), allocated(false) { init(); } ImplicitHTConvolution(unsigned int m, unsigned int M=1) : m(m), u(utils::ComplexAlign(m*M+M)), v(utils::ComplexAlign(m*M+M)), w(utils::ComplexAlign(m*M+M)), M(M), allocated(true) { init(); } ~ImplicitHTConvolution() { deletepointers(W); if(allocated) { utils::deleteAlign(w); utils::deleteAlign(v); utils::deleteAlign(u); } utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete cro; delete rco; delete cr; delete rc; } void mult(double *a, double *b, double **C, unsigned int offset=0); void convolve(Complex **F, Complex **G, Complex **H, Complex *u, Complex *v, Complex **W, unsigned int offset=0); // F, G, and H are distinct pointers to M distinct data blocks each of size // m+1, shifted by offset (contents not preserved). // The output is returned in F[0]. void convolve(Complex **F, Complex **G, Complex **H, unsigned int offset=0) { convolve(F,G,H,u,v,W,offset); } // Constructor for special case M=1: void convolve(Complex *f, Complex *g, Complex *h) { convolve(&f,&g,&h); } }; // In-place implicitly dealiased Hermitian ternary convolution. // Special case G=H, M=1. class ImplicitHFGGConvolution : public ThreadBase { protected: unsigned int m; Complex *u,*v; unsigned int s; rcfft1d *rc, *rco; crfft1d *cr, *cro; Complex *ZetaH, *ZetaL; bool allocated; unsigned int twom; unsigned int stride; public: void init() { twom=2*m; stride=twom+2; rc=new rcfft1d(twom,u); cr=new crfft1d(twom,u); rco=new rcfft1d(twom,(double *) u,v); cro=new crfft1d(twom,v,(double *) u); threads=std::min(threads,std::max(rco->Threads(),cro->Threads())); s=BuildZeta(4*m,m,ZetaH,ZetaL,threads); } // u and v are distinct temporary arrays each of size m+1. ImplicitHFGGConvolution(unsigned int m, Complex *u, Complex *v) : m(m), u(u), v(v), allocated(false) { init(); } ImplicitHFGGConvolution(unsigned int m) : m(m), u(utils::ComplexAlign(m+1)), v(utils::ComplexAlign(m+1)), allocated(true) { init(); } ~ImplicitHFGGConvolution() { if(allocated) { utils::deleteAlign(v); utils::deleteAlign(u); } utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete cro; delete rco; delete cr; delete rc; } void mult(double *a, double *b); void convolve(Complex *f, Complex *g, Complex *u, Complex *v); // f and g are distinct pointers to data of size m+1 (contents not // preserved). The output is returned in f. void convolve(Complex *f, Complex *g) { convolve(f,g,u,v); } }; // In-place implicitly dealiased Hermitian ternary convolution. // Special case F=G=H, M=1. class ImplicitHFFFConvolution : public ThreadBase { protected: unsigned int m; Complex *u; unsigned int s; rcfft1d *rc; crfft1d *cr; Complex *ZetaH, *ZetaL; bool allocated; unsigned int twom; unsigned int stride; public: void mult(double *a); void init() { twom=2*m; stride=twom+2; rc=new rcfft1d(twom,u); cr=new crfft1d(twom,u); threads=std::min(threads,std::max(rc->Threads(),cr->Threads())); s=BuildZeta(4*m,m,ZetaH,ZetaL,threads); } // u is a distinct temporary array of size m+1. ImplicitHFFFConvolution(unsigned int m, Complex *u) : m(m), u(u), allocated(false) { init(); } ImplicitHFFFConvolution(unsigned int m) : m(m), u(utils::ComplexAlign(m+1)), allocated(true) { init(); } ~ImplicitHFFFConvolution() { if(allocated) utils::deleteAlign(u); utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete cr; delete rc; } void convolve(Complex *f, Complex *u); // f is a pointer to data of size m+1 (contents not preserved). // The output is returned in f. void convolve(Complex *f) { convolve(f,u); } }; // Compute the scrambled implicitly 2m-padded complex Fourier transform of M // complex vectors, each of length 2m with the Fourier origin at index m. // The arrays in and out (which may coincide), along // with the array u, must be allocated as Complex[M*2m]. // // fft0bipad fft(m,M,stride); // fft.backwards(in,u); // fft.forwards(in,u); // // Notes: // stride is the spacing between the elements of each Complex vector. // class fft0bipad { unsigned int m; unsigned int M; unsigned int stride; unsigned int s; mfft1d *Backwards; mfft1d *Forwards; Complex *ZetaH, *ZetaL; unsigned int threads; public: fft0bipad(unsigned int m, unsigned int M, unsigned int stride, Complex *f, unsigned int Threads=fftw::maxthreads) : m(m), M(M), stride(stride), threads(Threads) { unsigned int twom=2*m; Backwards=new mfft1d(twom,1,M,stride,1,f,NULL,threads); Forwards=new mfft1d(twom,-1,M,stride,1,f,NULL,threads); threads=std::min(threads, std::max(Backwards->Threads(),Forwards->Threads())); s=BuildZeta(4*m,twom,ZetaH,ZetaL,threads); } ~fft0bipad() { utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete Forwards; delete Backwards; } void backwards(Complex *f, Complex *u); void forwards(Complex *f, Complex *u); }; // In-place implicitly dealiased 2D Hermitian ternary convolution. class ImplicitHTConvolution2 : public ThreadBase { protected: unsigned int mx,my; Complex *u1,*v1,*w1; Complex *u2,*v2,*w2; unsigned int M; fft0bipad *xfftpad; ImplicitHTConvolution *yconvolve; Complex **U2,**V2,**W2; bool allocated; Complex **u,**v; Complex ***W; public: void initpointers(Complex **&u, Complex **&v, Complex ***&W, unsigned int threads) { u=new Complex *[threads]; v=new Complex *[threads]; W=new Complex **[threads]; unsigned int my1M=(my+1)*M; for(unsigned int i=0; i < threads; ++i) { unsigned int imy1M=i*my1M; u[i]=u1+imy1M; v[i]=v1+imy1M; Complex *wi=w1+imy1M; yconvolve->initpointers(W[i],wi); } } void deletepointers(Complex **&u, Complex **&v, Complex ***&W, unsigned int threads) { for(unsigned int i=0; i < threads; ++i) yconvolve->deletepointers(W[i]); delete [] W; delete [] v; delete [] u; } void initpointers(Complex **&U2, Complex **&V2, Complex **&W2, Complex *u2, Complex *v2, Complex *w2) { U2=new Complex *[M]; V2=new Complex *[M]; W2=new Complex *[M]; unsigned int mu=2*mx*(my+1); for(unsigned int s=0; s < M; ++s) { unsigned int smu=s*mu; U2[s]=u2+smu; V2[s]=v2+smu; W2[s]=w2+smu; } } void deletepointers(Complex **&U2, Complex **&V2, Complex **&W2) { delete [] W2; delete [] V2; delete [] U2; } void init() { xfftpad=new fft0bipad(mx,my,my+1,u2,threads); yconvolve=new ImplicitHTConvolution(my,u1,v1,w1,M); yconvolve->Threads(1); initpointers(u,v,W,threads); initpointers(U2,V2,W2,u2,v2,w2); } // u1, v1, and w1 are temporary arrays of size (my+1)*M*threads; // u2, v2, and w2 are temporary arrays of size 2mx*(my+1)*M. // M is the number of data blocks (each corresponding to a dot product term). // threads is the number of threads to use in the outer subconvolution loop. ImplicitHTConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *v1, Complex *w1, Complex *u2, Complex *v2, Complex *w2, unsigned int M=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(u1), v1(v1), w1(w1), u2(u2), v2(v2), w2(w2), M(M), allocated(false) { init(); } ImplicitHTConvolution2(unsigned int mx, unsigned int my, unsigned int M=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(utils::ComplexAlign((my+1)*M*threads)), v1(utils::ComplexAlign((my+1)*M*threads)), w1(utils::ComplexAlign((my+1)*M*threads)), u2(utils::ComplexAlign(2*mx*(my+1)*M)), v2(utils::ComplexAlign(2*mx*(my+1)*M)), w2(utils::ComplexAlign(2*mx*(my+1)*M)), M(M), allocated(true) { init(); } ~ImplicitHTConvolution2() { deletepointers(U2,V2,W2); deletepointers(u,v,W,threads); delete yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(w2); utils::deleteAlign(v2); utils::deleteAlign(u2); utils::deleteAlign(w1); utils::deleteAlign(v1); utils::deleteAlign(u1); } } void convolve(Complex **F, Complex **G, Complex **H, Complex **u, Complex **v, Complex ***W, Complex **U2, Complex **V2, Complex **W2, bool symmetrize=true, unsigned int offset=0) { Complex *u2=U2[0]; Complex *v2=V2[0]; Complex *w2=W2[0]; unsigned int my1=my+1; unsigned int mu=2*mx*my1; for(unsigned int s=0; s < M; ++s) { Complex *f=F[s]+offset; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,f); xfftpad->backwards(f,u2+s*mu); } for(unsigned int s=0; s < M; ++s) { Complex *g=G[s]+offset; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,g); xfftpad->backwards(g,v2+s*mu); } for(unsigned int s=0; s < M; ++s) { Complex *h=H[s]+offset; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,h); xfftpad->backwards(h,w2+s*mu); } #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) { unsigned int thread=get_thread_num(); yconvolve->convolve(F,G,H,u[thread],v[thread],W[thread],i+offset); } #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) { unsigned int thread=get_thread_num(); yconvolve->convolve(U2,V2,W2,u[thread],v[thread],W[thread],i+offset); } xfftpad->forwards(F[0]+offset,u2); } // F, G, and H are distinct pointers to M distinct data blocks each of size // 2mx*(my+1), shifted by offset (contents not preserved). // The output is returned in F[0]. void convolve(Complex **F, Complex **G, Complex **H, bool symmetrize=true, unsigned int offset=0) { convolve(F,G,H,u,v,W,U2,V2,W2,symmetrize,offset); } // Constructor for special case M=1: void convolve(Complex *f, Complex *g, Complex *h, bool symmetrize=true) { convolve(&f,&g,&h,symmetrize); } }; // In-place implicitly dealiased 2D Hermitian ternary convolution. // Special case G=H, M=1. class ImplicitHFGGConvolution2 : public ThreadBase { protected: unsigned int mx,my; Complex *u1,*v1; Complex *u2,*v2; fft0bipad *xfftpad; ImplicitHFGGConvolution *yconvolve; bool allocated; Complex **u,**v; public: void initpointers(Complex **&u, Complex **&v, unsigned int threads) { u=new Complex *[threads]; v=new Complex *[threads]; unsigned int my1=my+1; for(unsigned int i=0; i < threads; ++i) { unsigned int imy1=i*my1; u[i]=u1+imy1; v[i]=v1+imy1; } } void deletepointers(Complex **&u, Complex **&v) { delete [] v; delete [] u; } void init() { xfftpad=new fft0bipad(mx,my,my+1,u2,threads); yconvolve=new ImplicitHFGGConvolution(my,u1,v1); yconvolve->Threads(1); initpointers(u,v,threads); } // u1 and v1 are temporary arrays of size (my+1)*threads. // u2 and v2 are temporary arrays of size 2mx*(my+1). // threads is the number of threads to use in the outer subconvolution loop. ImplicitHFGGConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *v1, Complex *u2, Complex *v2, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(u1), v1(v1), u2(u2), v2(v2), allocated(false) { init(); } ImplicitHFGGConvolution2(unsigned int mx, unsigned int my, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(utils::ComplexAlign((my+1)*threads)), v1(utils::ComplexAlign((my+1)*threads)), u2(utils::ComplexAlign(2*mx*(my+1))), v2(utils::ComplexAlign(2*mx*(my+1))), allocated(true) { init(); } ~ImplicitHFGGConvolution2() { deletepointers(u,v); delete yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(v2); utils::deleteAlign(u2); utils::deleteAlign(v1); utils::deleteAlign(u1); } } void convolve(Complex *f, Complex *g, Complex **u, Complex **v, Complex *u2, Complex *v2, bool symmetrize=true) { unsigned int my1=my+1; unsigned int mu=2*mx*my1; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,f); xfftpad->backwards(f,u2); if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,g); xfftpad->backwards(g,v2); #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) { unsigned int thread=get_thread_num(); yconvolve->convolve(f+i,g+i,u[thread],v[thread]); } #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) { unsigned int thread=get_thread_num(); yconvolve->convolve(u2+i,v2+i,u[thread],v[thread]); } xfftpad->forwards(f,u2); } void convolve(Complex *f, Complex *g, bool symmetrize=true) { convolve(f,g,u,v,u2,v2,symmetrize); } }; // In-place implicitly dealiased 2D Hermitian ternary convolution. // Special case F=G=H, M=1. class ImplicitHFFFConvolution2 : public ThreadBase { protected: unsigned int mx,my; Complex *u1; Complex *u2; fft0bipad *xfftpad; ImplicitHFFFConvolution *yconvolve; bool allocated; Complex **u; public: void initpointers(Complex **&u, unsigned int threads) { u=new Complex *[threads]; unsigned int my1=my+1; for(unsigned int i=0; i < threads; ++i) u[i]=u1+i*my1; } void deletepointers(Complex **&u) { delete [] u; } void init() { xfftpad=new fft0bipad(mx,my,my+1,u2,threads); yconvolve=new ImplicitHFFFConvolution(my,u1); yconvolve->Threads(1); initpointers(u,threads); } // u1 is a temporary array of size (my+1)*threads. // u2 is a temporary array of size 2mx*(my+1). // threads is the number of threads to use in the outer subconvolution loop. ImplicitHFFFConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *u2, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(u1), u2(u2), allocated(false) { init(); } ImplicitHFFFConvolution2(unsigned int mx, unsigned int my, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(utils::ComplexAlign((my+1)*threads)), u2(utils::ComplexAlign(2*mx*(my+1))), allocated(true) { init(); } ~ImplicitHFFFConvolution2() { deletepointers(u); delete yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(u2); utils::deleteAlign(u1); } } void convolve(Complex *f, Complex **u, Complex *u2, bool symmetrize=true) { unsigned int my1=my+1; unsigned int mu=2*mx*my1; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,f); xfftpad->backwards(f,u2); #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) yconvolve->convolve(f+i,u[get_thread_num()]); #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) yconvolve->convolve(u2+i,u[get_thread_num()]); xfftpad->forwards(f,u2); } void convolve(Complex *f, bool symmetrize=true) { convolve(f,u,u2,symmetrize); } }; } //end namespace fftwpp #endif
oyranos_cmm_lcm2.c
/** @file oyranos_cmm_lcm2.c * * Oyranos is an open source Color Management System * * @par Copyright: * 2007-2017 (C) Kai-Uwe Behrmann * * @brief littleCMS CMM module for Oyranos * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * new BSD <http://www.opensource.org/licenses/BSD-3-Clause> * @since 2007/11/12 */ #include <lcms2.h> #include <stdarg.h> #include <stdlib.h> #include "oyArray2d_s_.h" #include "oyCMM_s.h" #include "oyCMMapi4_s.h" #include "oyCMMapi4_s_.h" #include "oyCMMapi6_s_.h" #include "oyCMMapi7_s.h" #include "oyCMMapi7_s_.h" #include "oyCMMapi10_s_.h" #include "oyCMMui_s_.h" #include "oyConnectorImaging_s_.h" #include "oyImage_s.h" #include "oyProfiles_s.h" #include "oyStructList_s.h" #include "oyranos_cmm.h" /* the API's this CMM implements */ #include "oyranos_config_internal.h" #include "oyranos_generic.h" /* oy_connector_imaging_static_object */ #include "oyranos_helper.h" /* oySprintf_ and other local helpers */ #include "oyranos_i18n.h" #include "oyranos_io.h" #include "oyranos_image.h" #include "oyranos_object_internal.h" #include "oyranos_string.h" #ifdef _OPENMP #define USE_OPENMP 1 #include <omp.h> #endif extern oyCMMapi4_s_ l2cms_api4_cmm; /* oyCMM_s lcm2_cmm_module; oyCMMapi4_s l2cms_api4_cmm; oyCMMui_s l2cms_api4_ui; oyCMMapi7_s l2cms_api7_cmm; oyConnectorImaging_s* l2cms_cmmIccSocket_connectors[2]; oyConnectorImaging_s l2cms_cmmIccSocket_connector; oyConnectorImaging_s* l2cms_cmmIccPlug_connectors[2]; oyConnectorImaging_s l2cms_cmmIccPlug_connector; oyCMMapi6_s l2cms_api6_cmm; OY_LCM2_DATA_CONVERT_REGISTRATION oyCMMapi10_s l2cms_api10_cmm; OY_LCM2_CREATE_ABSTRACT_PROOFING_REGISTRATION oyCMMapi10_s l2cms_api10_cmm2; OY_LCM2_CREATE_MATRIX_REGISTRATION oyCMMapi10_s l2cms_api10_cmm3; OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_LAB_REGISTRATION oyCMMapi10_s l2cms_api10_cmm4; OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_BRADFORD_REGISTRATION oyCMMapi10_s l2cms_api10_cmm5; OY_LCM2_PARSE_CGATS */ void* oyAllocateFunc_ (size_t size); void* oyAllocateWrapFunc_ (size_t size, oyAlloc_f allocate_func); void oyDeAllocateFunc_ (void * data); #include <math.h> #include <string.h> /* memcpy */ /* --- internal definitions --- */ #define CMM_NICK "lcm2" #define CMMProfileOpen_M l2cmsOpenProfileFromMemTHR #define CMMProfileRelease_M l2cmsCloseProfile #define CMMToString_M(text) #text #define CMMMaxChannels_M 16 #define l2cmsPROFILE "lcP2" #define l2cmsTRANSFORM "lcC2" /** The proofing LUTs grid size may improove the sharpness of out of color * marking, but at the prise of lost speed and increased memory consumption. * 53 is the grid size used internally in l2cms' gamut marking code. */ #define l2cmsPROOF_LUT_GRID_RASTER 53 /*#define ENABLE_MPE 1*/ #define CMM_VERSION {0,1,1} oyMessage_f l2cms_msg = oyMessageFunc; void l2cmsErrorHandlerFunction ( cmsContext ContextID, cmsUInt32Number ErrorCode, const char * ErrorText ); int l2cmsCMMMessageFuncSet( oyMessage_f l2cms_msg_func ); int l2cmsCMMInit ( ); /** @struct l2cmsProfileWrap_s * @brief l2cms wrapper for profile data struct * * @version Oyranos: 0.1.8 * @date 2007/12/10 * @since 2007/12/10 (Oyranos: 0.1.8) */ typedef struct l2cmsProfileWrap_s_ { uint32_t type; /**< shall be l2cmsPROFILE */ size_t size; /**< size of block */ oyPointer block; /**< profile data */ oyPointer l2cms; /**< cmsHPROFILE struct */ icColorSpaceSignature sig; /**< ICC profile signature */ oyProfile_s *dbg_profile; /**< only for debugging */ } l2cmsProfileWrap_s; /** @struct l2cmsTransformWrap_s * @brief l2cms wrapper for transform data struct * * @version Oyranos: 0.1.8 * @date 2007/12/20 * @since 2007/12/20 (Oyranos: 0.1.8) */ typedef struct l2cmsTransformWrap_s_ { uint32_t type; /**< shall be l2cmsTRANSFORM */ oyPointer l2cms; /**< cmsHPROFILE struct */ icColorSpaceSignature sig_in; /**< ICC profile signature */ icColorSpaceSignature sig_out; /**< ICC profile signature */ oyPixel_t oy_pixel_layout_in; oyPixel_t oy_pixel_layout_out; } l2cmsTransformWrap_s; l2cmsTransformWrap_s * l2cmsTransformWrap_Set_ ( cmsHTRANSFORM xform, icColorSpaceSignature color_in, icColorSpaceSignature color_out, oyPixel_t oy_pixel_layout_in, oyPixel_t oy_pixel_layout_out, oyPointer_s * oy ); int l2cmsCMMTransform_GetWrap_ ( oyPointer_s * cmm_ptr, l2cmsTransformWrap_s ** s ); int l2cmsCMMDeleteTransformWrap ( oyPointer * wrap ); l2cmsProfileWrap_s * l2cmsCMMProfile_GetWrap_( oyPointer_s * cmm_ptr ); int l2cmsCMMProfileReleaseWrap ( oyPointer * p ); int l2cmsCMMCheckPointer(oyPointer_s * cmm_ptr, const char * resource ); int oyPixelToLcm2PixelLayout_ ( oyPixel_t pixel_layout, icColorSpaceSignature color_space ); char * l2cmsImage_GetText ( oyImage_s * image, int verbose, oyAlloc_f allocateFunc ); char * l2cmsFilterNode_GetText ( oyFilterNode_s * node, oyNAME_e type, oyAlloc_f allocateFunc ); extern char l2cms_extra_options[]; char * l2cmsFlagsToText ( int flags ); cmsHPROFILE l2cmsGamutCheckAbstract ( oyProfile_s * proof, cmsUInt32Number flags, int intent, int intent_proof, uint32_t icc_profile_flags ); oyPointer l2cmsCMMColorConversion_ToMem_ ( cmsHTRANSFORM * xform, oyOptions_s * opts, size_t * size, oyAlloc_f allocateFunc ); oyOptions_s* l2cmsFilter_CmmIccValidateOptions ( oyFilterCore_s * filter, oyOptions_s * validate, int statical, uint32_t * result ); cmsHPROFILE l2cmsAddProfile ( oyProfile_s * p ); l2cmsProfileWrap_s * l2cmsAddProofProfile ( oyProfile_s * proof, cmsUInt32Number flags, int intent, int intent_proof, uint32_t icc_profile_flags ); oyPointer l2cmsFilterNode_CmmIccContextToMem ( oyFilterNode_s * node, size_t * size, oyAlloc_f allocateFunc ); int l2cmsModuleData_Convert ( oyPointer_s * data_in, oyPointer_s * data_out, oyFilterNode_s * node ); int l2cmsFilterPlug_CmmIccRun ( oyFilterPlug_s * requestor_plug, oyPixelAccess_s * ticket ); const char * l2cmsInfoGetText ( const char * select, oyNAME_e type, oyStruct_s * context ); /* --- implementations --- */ /* explicitely load liblcms functions, to avoid conflicts */ static int l2cms_initialised = 0; /* 0 - need init; 1 - successful init; -1 - error on init */ static void * l2cms_handle = NULL; static void (*l2cmsSetLogErrorHandler)(cmsLogErrorHandlerFunction Fn) = NULL; static void (*l2cmsSetLogErrorHandlerTHR)( cmsContext ContextID, cmsLogErrorHandlerFunction Fn) = NULL; static cmsColorSpaceSignature (*l2cmsGetColorSpace)(cmsHPROFILE hProfile) = NULL; static cmsColorSpaceSignature (*l2cmsGetPCS)(cmsHPROFILE hProfile) = NULL; static cmsProfileClassSignature (*l2cmsGetDeviceClass)(cmsHPROFILE hProfile) = NULL; static cmsUInt32Number (*l2cmsGetProfileInfoASCII)(cmsHPROFILE hProfile, cmsInfoType Info, const char LanguageCode[3], const char CountryCode[3], char* Buffer, cmsUInt32Number BufferSize) = NULL; static int (*l2_cmsLCMScolorSpace)(cmsColorSpaceSignature ProfileSpace) = NULL; static cmsUInt32Number (*l2cmsChannelsOf)(cmsColorSpaceSignature ColorSpace) = NULL; static cmsBool (*l2cmsIsTag)(cmsHPROFILE hProfile, cmsTagSignature sig) = NULL; static cmsHTRANSFORM (*l2cmsCreateTransform)(cmsHPROFILE Input, cmsUInt32Number InputFormat, cmsHPROFILE Output, cmsUInt32Number OutputFormat, cmsUInt32Number Intent, cmsUInt32Number dwFlags) = NULL; static cmsHTRANSFORM (*l2cmsCreateTransformTHR)(cmsContext ContextID, cmsHPROFILE Input, cmsUInt32Number InputFormat, cmsHPROFILE Output, cmsUInt32Number OutputFormat, cmsUInt32Number Intent, cmsUInt32Number dwFlags) = NULL; static cmsHTRANSFORM (*l2cmsCreateProofingTransform)(cmsHPROFILE Input, cmsUInt32Number InputFormat, cmsHPROFILE Output, cmsUInt32Number OutputFormat, cmsHPROFILE Proofing, cmsUInt32Number Intent, cmsUInt32Number ProofingIntent, cmsUInt32Number dwFlags) = NULL; static cmsHTRANSFORM (*l2cmsCreateProofingTransformTHR)(cmsContext ContextID, cmsHPROFILE Input, cmsUInt32Number InputFormat, cmsHPROFILE Output, cmsUInt32Number OutputFormat, cmsHPROFILE Proofing, cmsUInt32Number Intent, cmsUInt32Number ProofingIntent, cmsUInt32Number dwFlags) = NULL; static cmsHTRANSFORM (*l2cmsCreateMultiprofileTransform)(cmsHPROFILE hProfiles[], cmsUInt32Number nProfiles, cmsUInt32Number InputFormat, cmsUInt32Number OutputFormat, cmsUInt32Number Intent, cmsUInt32Number dwFlags) = NULL; static cmsHTRANSFORM (*l2cmsCreateExtendedTransform)(cmsContext ContextID, cmsUInt32Number nProfiles, cmsHPROFILE hProfiles[], cmsBool BPC[], cmsUInt32Number Intents[], cmsFloat64Number AdaptationStates[], cmsHPROFILE hGamutProfile, cmsUInt32Number nGamutPCSposition, cmsUInt32Number InputFormat, cmsUInt32Number OutputFormat, cmsUInt32Number dwFlags) = NULL; static void (*l2cmsDeleteTransform)(cmsHTRANSFORM hTransform) = NULL; static void (*l2cmsDoTransform)(cmsHTRANSFORM Transform, const void * InputBuffer, void * OutputBuffer, cmsUInt32Number Size) = NULL; static cmsHPROFILE (*l2cmsTransform2DeviceLink)(cmsHTRANSFORM hTransform, cmsFloat64Number Version, cmsUInt32Number dwFlags) = NULL; static cmsBool (*l2cmsSaveProfileToMem)(cmsHPROFILE hProfile, void *MemPtr, cmsUInt32Number* BytesNeeded) = NULL; static cmsHPROFILE (*l2cmsOpenProfileFromFile)(const char *ICCProfile, const char *sAccess) = NULL; static cmsHPROFILE (*l2cmsOpenProfileFromMemTHR)(cmsContext ContextID, const void * MemPtr, cmsUInt32Number dwSize) = NULL; static cmsHPROFILE (*l2cmsOpenProfileFromFileTHR)(cmsContext ContextID, const char *ICCProfile, const char *sAccess) = NULL; static cmsBool (*l2cmsSaveProfileToFile)(cmsHPROFILE hProfile, const char* FileName) = NULL; static cmsBool (*l2cmsCloseProfile)(cmsHPROFILE hProfile) = NULL; static cmsHPROFILE (*l2cmsCreateProfilePlaceholder)(cmsContext ContextID) = NULL; static cmsHPROFILE (*l2cmsCreateLab4ProfileTHR)(cmsContext ContextID, const cmsCIExyY* WhitePoint) = NULL; static cmsHPROFILE (*l2cmsCreateLab4Profile)(const cmsCIExyY* WhitePoint) = NULL; static cmsHPROFILE (*l2cmsCreateXYZProfile)() = NULL; static cmsHPROFILE (*l2cmsCreate_sRGBProfile)() = NULL; static void (*l2cmsSetProfileVersion)(cmsHPROFILE hProfile, cmsFloat64Number Version) = NULL; static void (*l2cmsSetDeviceClass)(cmsHPROFILE hProfile, cmsProfileClassSignature sig) = NULL; static void (*l2cmsSetColorSpace)(cmsHPROFILE hProfile, cmsColorSpaceSignature sig) = NULL; static void (*l2cmsSetPCS)(cmsHPROFILE hProfile, cmsColorSpaceSignature pcs) = NULL; static cmsToneCurve* (*l2cmsBuildGamma)(cmsContext ContextID, cmsFloat64Number Gamma) = NULL; static cmsToneCurve*(*l2cmsBuildSegmentedToneCurve)(cmsContext ContextID, cmsInt32Number nSegments, const cmsCurveSegment Segments[]) = NULL; static cmsToneCurve*(*l2cmsBuildParametricToneCurve)(cmsContext ContextID, cmsInt32Number Type, const cmsFloat64Number Parameters[]) = NULL; static void (*l2cmsFreeToneCurve)(cmsToneCurve* Curve) = NULL; static cmsPipeline* (*l2cmsPipelineAlloc) (cmsContext ContextID, cmsUInt32Number InputChannels, cmsUInt32Number OutputChannels) = NULL; static int (*l2cmsPipelineInsertStage) (cmsPipeline* lut, cmsStageLoc loc, cmsStage* mpe) = NULL; static void (*l2cmsPipelineFree) (cmsPipeline* lut) = NULL; static cmsStage* (*l2cmsPipelineGetPtrToFirstStage) (const cmsPipeline* lut ) = NULL; static cmsStageSignature (*l2cmsStageType) (const cmsStage* stage) = NULL; static cmsStage* (*l2cmsStageNext) (const cmsStage* next ) = NULL; static cmsUInt32Number (*l2cmsStageInputChannels) (const cmsStage* stage) = NULL; static cmsUInt32Number (*l2cmsStageOutputChannels) (const cmsStage* stage) = NULL; static cmsStage*(*l2cmsStageAllocCLut16bit)(cmsContext ContextID, cmsUInt32Number nGridPoints, cmsUInt32Number inputChan, cmsUInt32Number outputChan, const cmsUInt16Number* Table) = NULL; static cmsStage*(*l2cmsStageAllocCLutFloat)(cmsContext ContextID, cmsUInt32Number nGridPoints, cmsUInt32Number inputChan, cmsUInt32Number outputChan, const cmsFloat32Number* Table) = NULL; static cmsBool (*l2cmsStageSampleCLut16bit)(cmsStage* mpe, cmsSAMPLER16 Sampler, void* Cargo, cmsUInt32Number dwFlags) = NULL; static cmsBool (*l2cmsStageSampleCLutFloat)(cmsStage* mpe, cmsSAMPLERFLOAT Sampler, void* Cargo, cmsUInt32Number dwFlags) = NULL; static cmsStage*(*l2cmsStageAllocToneCurves)(cmsContext ContextID, cmsUInt32Number nChannels, cmsToneCurve* const Curves[]) = NULL; static void* (*l2cmsReadTag)(cmsHPROFILE hProfile, cmsTagSignature sig) = NULL; static cmsBool (*l2cmsWriteTag)(cmsHPROFILE hProfile, cmsTagSignature sig, const void* data) = NULL; static cmsMLU*(*l2cmsMLUalloc)(cmsContext ContextID, cmsUInt32Number nItems) = NULL; static cmsBool (*l2cmsMLUsetASCII)(cmsMLU* mlu, const char LanguageCode[3], const char CountryCode[3], const char* ASCIIString) = NULL; static cmsBool (*l2cmsMLUsetWide)(cmsMLU* mlu, const char LanguageCode[3], const char CountryCode[3], const wchar_t* WideString) = NULL; static void (*l2cmsMLUfree)(cmsMLU* mlu) = NULL; static cmsHANDLE (*l2cmsDictAlloc)(cmsContext ContextID); static void (*l2cmsDictFree)(cmsHANDLE hDict); static cmsHANDLE (*l2cmsDictDup)(cmsHANDLE hDict); static cmsBool (*l2cmsDictAddEntry)(cmsHANDLE hDict, const wchar_t* Name, const wchar_t* Value, const cmsMLU *DisplayName, const cmsMLU *DisplayValue); static const cmsDICTentry* (*l2cmsDictGetEntryList)(cmsHANDLE hDict); static const cmsDICTentry* (*l2cmsDictNextEntry)(const cmsDICTentry* e); static cmsHPROFILE (*l2cmsCreateRGBProfile)(const cmsCIExyY* WhitePoint, const cmsCIExyYTRIPLE* Primaries, cmsToneCurve* const TransferFunction[3]) = NULL; static void (*l2cmsLabEncoded2Float)(cmsCIELab* Lab, const cmsUInt16Number wLab[3]) = NULL; static void (*l2cmsFloat2LabEncoded)(cmsUInt16Number wLab[3], const cmsCIELab* Lab) = NULL; static const cmsCIEXYZ* (*l2cmsD50_XYZ)(void); static const cmsCIExyY* (*l2cmsD50_xyY)(void); static cmsBool (*l2cmsWhitePointFromTemp)(cmsCIExyY* WhitePoint, cmsFloat64Number TempK) = NULL; static cmsBool (*l2cmsAdaptToIlluminant)(cmsCIEXYZ* Result, const cmsCIEXYZ* SourceWhitePt, const cmsCIEXYZ* Illuminant, const cmsCIEXYZ* Value) = NULL; static void (*l2cmsxyY2XYZ)(cmsCIEXYZ* Dest, const cmsCIExyY* Source) = NULL; static void (*l2cmsXYZ2Lab)(const cmsCIEXYZ* WhitePoint, cmsCIELab* Lab, const cmsCIEXYZ* xyz) = NULL; static void (*l2cmsLab2XYZ)(const cmsCIEXYZ* WhitePoint, cmsCIEXYZ* xyz, const cmsCIELab* Lab) = NULL; static cmsFloat64Number (*l2cmsDeltaE)(const cmsCIELab* Lab1, const cmsCIELab* Lab2) = NULL; static void (*l2cmsGetAlarmCodes)(cmsUInt16Number NewAlarm[cmsMAXCHANNELS]) = NULL; static cmsContext (*l2cmsCreateContext)(void* Plugin, void* UserData) = NULL; static cmsContext dummyCreateContext(void* Plugin OY_UNUSED, void* UserData OY_UNUSED) {return NULL;} static void* (*l2cmsGetContextUserData)(cmsContext ContextID) = NULL; static void* dummyGetContextUserData(cmsContext ContextID OY_UNUSED) {return NULL;} static cmsContext (*l2cmsGetProfileContextID)(cmsHPROFILE hProfile) = NULL; static cmsContext (*l2cmsGetTransformContextID)(cmsHPROFILE hProfile) = NULL; static int dummyGetEncodedCMMversion() {return LCMS_VERSION;} static int (*l2cmsGetEncodedCMMversion)(void) = dummyGetEncodedCMMversion; #if !defined(COMPILE_STATIC) #define LOAD_FUNC( func, fallback_func ) l2##func = dlsym(l2cms_handle, #func ); \ if(!l2##func) \ { \ oyMSG_e type = oyMSG_ERROR; \ if(#fallback_func != NULL) \ { \ l2##func = fallback_func; \ type = oyMSG_WARN; \ } else \ { \ error = 1; \ } \ report = 1; \ l2cms_msg( type,0, OY_DBG_FORMAT_" " \ "dlsym failed: %s", \ OY_DBG_ARGS_, dlerror() ); \ } #else #define LOAD_FUNC( func, fallback_func ) l2##func = func; \ if(!l2##func) \ { \ oyMSG_e type = oyMSG_ERROR; \ if(#fallback_func != NULL) \ { \ l2##func = fallback_func; \ type = oyMSG_WARN; \ } else \ { \ error = 1; \ } \ report = 1; \ l2cms_msg( type,0, OY_DBG_FORMAT_" " \ "dlsym failed: %s", \ OY_DBG_ARGS_, dlerror() ); \ } \ l2cms_handle = 0; #define dlerror() l2cms_handle = 0 #endif /** Function l2cmsCMMInit * @brief API requirement * * @version Oyranos: 0.9.5 * @date 2014/02/27 * @since 2007/12/11 (Oyranos: 0.1.8) */ int l2cmsCMMInit ( oyStruct_s * filter OY_UNUSED ) { int error = 0; if(!l2cms_initialised) { int report = 0; char * fn = oyLibNameCreate_( "lcms2", 2 ); #if !defined(COMPILE_STATIC) l2cms_handle = dlopen(fn, RTLD_LAZY); if(!l2cms_handle) { l2cms_msg( oyMSG_ERROR,0, OY_DBG_FORMAT_" " "init \"%s\" failed: %s", OY_DBG_ARGS_, fn, dlerror() ); error = 1; l2cms_initialised = -1; } else #endif { LOAD_FUNC( cmsSetLogErrorHandler, NULL ); #if LCMS_VERSION >= 2060 LOAD_FUNC( cmsSetLogErrorHandlerTHR, NULL ); #endif LOAD_FUNC( cmsGetColorSpace, NULL ); LOAD_FUNC( cmsGetPCS, NULL ); LOAD_FUNC( cmsGetDeviceClass, NULL ); LOAD_FUNC( cmsGetProfileInfoASCII, NULL ); LOAD_FUNC( _cmsLCMScolorSpace, NULL ); LOAD_FUNC( cmsChannelsOf, NULL ); LOAD_FUNC( cmsIsTag, NULL ); LOAD_FUNC( cmsCreateTransform, NULL ); LOAD_FUNC( cmsCreateTransformTHR, NULL ); LOAD_FUNC( cmsCreateProofingTransform, NULL ); LOAD_FUNC( cmsCreateProofingTransformTHR, NULL ); LOAD_FUNC( cmsCreateMultiprofileTransform, NULL ); LOAD_FUNC( cmsCreateExtendedTransform, NULL ); LOAD_FUNC( cmsDeleteTransform, NULL ); LOAD_FUNC( cmsDoTransform, NULL ); LOAD_FUNC( cmsOpenProfileFromFile, NULL ); LOAD_FUNC( cmsSaveProfileToFile, NULL ); LOAD_FUNC( cmsTransform2DeviceLink, NULL ); LOAD_FUNC( cmsSaveProfileToMem, NULL ); LOAD_FUNC( cmsOpenProfileFromMemTHR, NULL ); #if LCMS_VERSION >= 2060 LOAD_FUNC( cmsOpenProfileFromFileTHR, NULL ); #endif LOAD_FUNC( cmsCloseProfile, NULL ); LOAD_FUNC( cmsCreateProfilePlaceholder, NULL ); LOAD_FUNC( cmsSetProfileVersion, NULL ); LOAD_FUNC( cmsCreateLab4ProfileTHR, NULL ); LOAD_FUNC( cmsCreateLab4Profile, NULL ); LOAD_FUNC( cmsCreateXYZProfile, NULL ); LOAD_FUNC( cmsCreate_sRGBProfile, NULL ); LOAD_FUNC( cmsCreateRGBProfile, NULL ); LOAD_FUNC( cmsSetDeviceClass, NULL ); LOAD_FUNC( cmsSetColorSpace, NULL ); LOAD_FUNC( cmsSetPCS, NULL ); LOAD_FUNC( cmsBuildGamma, NULL ); LOAD_FUNC( cmsBuildSegmentedToneCurve, NULL ); LOAD_FUNC( cmsBuildParametricToneCurve, NULL ); LOAD_FUNC( cmsFreeToneCurve, NULL ); LOAD_FUNC( cmsPipelineAlloc, NULL ); LOAD_FUNC( cmsPipelineFree, NULL ); LOAD_FUNC( cmsPipelineInsertStage, NULL ); LOAD_FUNC( cmsPipelineGetPtrToFirstStage, NULL ); LOAD_FUNC( cmsStageType, NULL ); LOAD_FUNC( cmsStageNext, NULL ); LOAD_FUNC( cmsStageInputChannels, NULL ); LOAD_FUNC( cmsStageOutputChannels, NULL ); LOAD_FUNC( cmsStageAllocCLut16bit, NULL ); LOAD_FUNC( cmsStageAllocCLutFloat, NULL ); LOAD_FUNC( cmsStageSampleCLut16bit, NULL ); LOAD_FUNC( cmsStageSampleCLutFloat, NULL ); LOAD_FUNC( cmsStageAllocToneCurves, NULL ); LOAD_FUNC( cmsReadTag, NULL ); LOAD_FUNC( cmsWriteTag, NULL ); LOAD_FUNC( cmsMLUalloc, NULL ); LOAD_FUNC( cmsMLUsetASCII, NULL ); LOAD_FUNC( cmsMLUsetWide, NULL ); LOAD_FUNC( cmsMLUfree, NULL ); LOAD_FUNC( cmsDictAlloc, NULL ); LOAD_FUNC( cmsDictFree, NULL ); LOAD_FUNC( cmsDictDup, NULL ); LOAD_FUNC( cmsDictAddEntry, NULL ); LOAD_FUNC( cmsDictGetEntryList, NULL ); LOAD_FUNC( cmsDictNextEntry, NULL ); LOAD_FUNC( cmsLabEncoded2Float, NULL ); LOAD_FUNC( cmsFloat2LabEncoded, NULL ); LOAD_FUNC( cmsD50_XYZ, NULL ); LOAD_FUNC( cmsD50_xyY, NULL ); LOAD_FUNC( cmsWhitePointFromTemp, NULL ); LOAD_FUNC( cmsAdaptToIlluminant, NULL ); LOAD_FUNC( cmsxyY2XYZ, NULL ); LOAD_FUNC( cmsXYZ2Lab, NULL ); LOAD_FUNC( cmsLab2XYZ, NULL ); LOAD_FUNC( cmsDeltaE, NULL ); LOAD_FUNC( cmsGetAlarmCodes, NULL ); #if LCMS_VERSION >= 2060 LOAD_FUNC( cmsCreateContext, dummyCreateContext ); /* available since lcms 2.6 */ LOAD_FUNC( cmsGetContextUserData, dummyGetContextUserData ); /* available since lcms 2.6 */ #else l2cmsCreateContext = dummyCreateContext; l2cmsGetContextUserData = dummyGetContextUserData; #endif LOAD_FUNC( cmsGetProfileContextID, NULL ); LOAD_FUNC( cmsGetTransformContextID, NULL ); #if LCMS_VERSION >= 2080 LOAD_FUNC( cmsGetEncodedCMMversion, dummyGetEncodedCMMversion ); #endif if(l2cmsSetLogErrorHandler) l2cmsSetLogErrorHandler( l2cmsErrorHandlerFunction ); else l2cms_msg( oyMSG_WARN, (oyStruct_s*)NULL, OY_DBG_FORMAT_"can not set error handler %d %d", OY_DBG_ARGS_, l2cmsGetEncodedCMMversion, LCMS_VERSION ); if(l2cmsGetEncodedCMMversion() != LCMS_VERSION) l2cms_msg( oyMSG_WARN, (oyStruct_s*)NULL, OY_DBG_FORMAT_" compile and run time version differ %d %d", OY_DBG_ARGS_, l2cmsGetEncodedCMMversion, LCMS_VERSION ); #if !defined(COMPILE_STATIC) if(error) l2cms_initialised = -1; else #endif l2cms_initialised = 1; if(report) l2cms_msg( oyMSG_WARN,0, OY_DBG_FORMAT_" " "init \"%s\" issue(s): v%d", OY_DBG_ARGS_, fn, l2cmsGetEncodedCMMversion() ); } oyFree_m_( fn ); } else if(l2cms_initialised == -1) error = 1; return error; } #define cmsSetLogErrorHandler l2cmsSetLogErrorHandler #define cmsSetLogErrorHandlerTHR l2cmsSetLogErrorHandlerTHR #define cmsGetColorSpace l2cmsGetColorSpace #define cmsGetPCS l2cmsGetPCS #define cmsGetDeviceClass l2cmsGetDeviceClass #define cmsGetProfileInfoASCII l2cmsGetProfileInfoASCII #define _cmsLCMScolorSpace l2_cmsLCMScolorSpace #define cmsChannelsOf l2cmsChannelsOf #define cmsIsTag l2cmsIsTag #define cmsCreateTransform l2cmsCreateTransform #define cmsCreateTransformTHR l2cmsCreateTransformTHR #define cmsCreateProofingTransform l2cmsCreateProofingTransform #define cmsCreateProofingTransformTHR l2cmsCreateProofingTransformTHR #define cmsCreateMultiprofileTransform l2cmsCreateMultiprofileTransform #define cmsCreateExtendedTransform l2cmsCreateExtendedTransform #define cmsDeleteTransform l2cmsDeleteTransform #define cmsDoTransform l2cmsDoTransform #define cmsOpenProfileFromFile l2cmsOpenProfileFromFile #define cmsSaveProfileToFile l2cmsSaveProfileToFile #define cmsTransform2DeviceLink l2cmsTransform2DeviceLink #define cmsSaveProfileToMem l2cmsSaveProfileToMem #define cmsOpenProfileFromMemTHR l2cmsOpenProfileFromMemTHR #define cmsOpenProfileFromFileTHR l2cmsOpenProfileFromFileTHR #define cmsCloseProfile l2cmsCloseProfile #define cmsCreateProfilePlaceholder l2cmsCreateProfilePlaceholder #define cmsSetProfileVersion l2cmsSetProfileVersion #define cmsCreateLab4ProfileTHR l2cmsCreateLab4ProfileTHR #define cmsCreateLab4Profile l2cmsCreateLab4Profile #define cmsCreateXYZProfile l2cmsCreateXYZProfile #define cmsCreate_sRGBProfile l2cmsCreate_sRGBProfile #define cmsCreateRGBProfile l2cmsCreateRGBProfile #define cmsSetDeviceClass l2cmsSetDeviceClass #define cmsSetColorSpace l2cmsSetColorSpace #define cmsSetPCS l2cmsSetPCS #define cmsBuildGamma l2cmsBuildGamma #define cmsBuildSegmentedToneCurve l2cmsBuildSegmentedToneCurve #define cmsBuildParametricToneCurve l2cmsBuildParametricToneCurve #define cmsFreeToneCurve l2cmsFreeToneCurve #define cmsPipelineAlloc l2cmsPipelineAlloc #define cmsPipelineFree l2cmsPipelineFree #define cmsPipelineInsertStage l2cmsPipelineInsertStage #define cmsPipelineGetPtrToFirstStage l2cmsPipelineGetPtrToFirstStage #define cmsStageType l2cmsStageType #define cmsStageNext l2cmsStageNext #define cmsStageInputChannels l2cmsStageInputChannels #define cmsStageOutputChannels l2cmsStageOutputChannels #define cmsStageAllocCLut16bit l2cmsStageAllocCLut16bit #define cmsStageAllocCLutFloat l2cmsStageAllocCLutFloat #define cmsStageSampleCLut16bit l2cmsStageSampleCLut16bit #define cmsStageSampleCLutFloat l2cmsStageSampleCLutFloat #define cmsStageAllocToneCurves l2cmsStageAllocToneCurves #define cmsReadTag l2cmsReadTag #define cmsWriteTag l2cmsWriteTag #define cmsMLUalloc l2cmsMLUalloc #define cmsMLUsetASCII l2cmsMLUsetASCII #define cmsMLUsetWide l2cmsMLUsetWide #define cmsMLUfree l2cmsMLUfree #define cmsDictAlloc l2cmsDictAlloc #define cmsDictFree l2cmsDictFree #define cmsDictDup l2cmsDictDup #define cmsDictAddEntry l2cmsDictAddEntry #define cmsDictGetEntryList l2cmsDictGetEntryList #define cmsDictNextEntry l2cmsDictNextEntry #define cmsLabEncoded2Float l2cmsLabEncoded2Float #define cmsFloat2LabEncoded l2cmsFloat2LabEncoded #define cmsD50_XYZ l2cmsD50_XYZ #define cmsD50_xyY l2cmsD50_xyY #define cmsWhitePointFromTemp l2cmsWhitePointFromTemp #define cmsAdaptToIlluminant l2cmsAdaptToIlluminant #define cmsxyY2XYZ l2cmsxyY2XYZ #define cmsXYZ2Lab l2cmsXYZ2Lab #define cmsLab2XYZ l2cmsLab2XYZ #define cmsDeltaE l2cmsDeltaE #define cmsGetAlarmCodes l2cmsGetAlarmCodes #define cmsCreateContext l2cmsCreateContext #define cmsGetContextUserData l2cmsGetContextUserData #define cmsGetProfileContextID l2cmsGetProfileContextID #define cmsGetTransformContextID l2cmsGetTransformContextID #define cmsGetEncodedCMMversion l2cmsGetEncodedCMMversion #include "lcm2_profiler.c" /** Function l2cmsCMMProfile_GetWrap_ * @brief convert to l2cms profile wrapper struct * * @version Oyranos: 0.1.8 * @date 2007/12/10 * @since 2007/12/10 (Oyranos: 0.1.8) */ l2cmsProfileWrap_s * l2cmsCMMProfile_GetWrap_( oyPointer_s* cmm_ptr ) { l2cmsProfileWrap_s * s = NULL; char * type_ = l2cmsPROFILE; unsigned type = *((uint32_t*)type_); if(cmm_ptr && !l2cmsCMMCheckPointer( cmm_ptr, l2cmsPROFILE ) && oyPointer_GetPointer(cmm_ptr)) s = (l2cmsProfileWrap_s*) oyPointer_GetPointer(cmm_ptr); if(s && s->type != type) s = NULL; if(s && oy_debug >= 2) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)cmm_ptr, OY_DBG_FORMAT_" profile size: %d %s cmm_ptr: %d", OY_DBG_ARGS_, s->size, s->dbg_profile?oyNoEmptyString_m_(oyProfile_GetFileName( s->dbg_profile,-1 )):"????", oyStruct_GetId((oyStruct_s*)cmm_ptr) ); } return s; } /** Function l2cmsCMMTransform_GetWrap_ * @brief convert to l2cms transform wrapper struct * * @version Oyranos: 0.1.8 * @since 2007/12/20 (Oyranos: 0.1.8) * @date 2009/05/28 */ int l2cmsCMMTransform_GetWrap_ ( oyPointer_s * cmm_ptr, l2cmsTransformWrap_s ** s ) { char * type_ = l2cmsTRANSFORM; unsigned type = *((uint32_t*)type_); if(cmm_ptr && !l2cmsCMMCheckPointer( cmm_ptr, l2cmsTRANSFORM ) && oyPointer_GetPointer(cmm_ptr)) *s = (l2cmsTransformWrap_s*) oyPointer_GetPointer(cmm_ptr); if(*s && ((*s)->type != type || !(*s)->l2cms)) { *s = 0; return 1; } return 0; } /** Function l2cmsCMMProfileReleaseWrap * @brief release a l2cms profile wrapper struct * * @version Oyranos: 0.1.8 * @date 2007/12/20 * @since 2007/12/20 (Oyranos: 0.1.8) */ int l2cmsCMMProfileReleaseWrap(oyPointer *p) { int error = !p; l2cmsProfileWrap_s * s = 0; char * type_ = l2cmsPROFILE; unsigned type = *((uint32_t*)type_); char s_type[4]; if(!error && *p) s = (l2cmsProfileWrap_s*) *p; if(!error) error = !s; if(!error) memcpy(s_type, &s->type, 4); if(!error && s->type != type) error = 1; if(!error) { #if LCMS_VERSION >= 2060 oyProfile_s * p = l2cmsGetContextUserData( l2cmsGetProfileContextID( s->l2cms ) ); oyProfile_Release ( &p ); #endif CMMProfileRelease_M (s->l2cms); oyProfile_Release( &s->dbg_profile ); s->l2cms = 0; s->type = 0; if(s->block && s->size) free(s->block); s->size = 0; s->block = 0; free(s); } if(!error) *p = 0; return error; } /** l2cmsCMMDataOpen() * @brief oyCMMProfileOpen_t implementation * * @version Oyranos: 0.1.10 * @since 2007/11/12 (Oyranos: 0.1.8) * @date 2007/12/27 */ int l2cmsCMMData_Open ( oyStruct_s * data, oyPointer_s * oy ) { oyPointer_s * s = 0; int error = 0; if(!error) { char * type_ = l2cmsPROFILE; int type = *((int32_t*)type_); size_t size = 0; oyPointer block = 0; oyProfile_s * p = NULL; l2cmsProfileWrap_s * s = calloc(sizeof(l2cmsProfileWrap_s), 1); if(data->type_ == oyOBJECT_PROFILE_S) { p = (oyProfile_s*)data; block = oyProfile_GetMem( p, &size, 0, malloc ); } s->type = type; s->size = size; s->block = block; if(oy_debug >= 2) { s->dbg_profile = oyProfile_Copy( p, 0 ); l2cms_msg( oyMSG_DBG, data, OY_DBG_FORMAT_" going to open %s", OY_DBG_ARGS_, p?oyProfile_GetFileName( p,-1 ):"????" ); } #if LCMS_VERSION < 2060 s->l2cms = CMMProfileOpen_M( data, block, size ); #else { oyProfile_s * p2 = oyProfile_Copy( p, NULL ); cmsContext tc = l2cmsCreateContext( NULL, p2 ); /* threading context */ l2cmsSetLogErrorHandlerTHR( tc, l2cmsErrorHandlerFunction ); s->l2cms = CMMProfileOpen_M( tc, block, size ); } #endif if(!s->l2cms) l2cms_msg( oyMSG_WARN, (oyStruct_s*)data, OY_DBG_FORMAT_" %s() failed", OY_DBG_ARGS_, "CMMProfileOpen_M" ); error = oyPointer_Set( oy, 0, l2cmsPROFILE, s, CMMToString_M(CMMProfileOpen_M), l2cmsCMMProfileReleaseWrap ); if(error) l2cms_msg( oyMSG_WARN, (oyStruct_s*)data, OY_DBG_FORMAT_" oyPointer_Set() failed", OY_DBG_ARGS_ ); } if(!error) s = oy; if(!error) error = !s; return error; } /** Function l2cmsCMMCheckPointer * @brief * * @version Oyranos: 0.1.8 * @date 2007/11/12 * @since 2007/11/12 (Oyranos: 0.1.8) */ int l2cmsCMMCheckPointer(oyPointer_s * cmm_ptr, const char * resource ) { int error = !cmm_ptr; if(cmm_ptr && oyPointer_GetPointer(cmm_ptr) && oyPointer_GetResourceName(cmm_ptr)) { int * res_id = (int*)oyPointer_GetResourceName(cmm_ptr); if(!oyCMMlibMatchesCMM(oyPointer_GetLibName(cmm_ptr), CMM_NICK) || *res_id != *((int*)(resource)) ) error = 1; } else { error = 1; } return error; } /** Function oyPixelToLcm2PixelLayout_ * @brief * * @version Oyranos: 0.1.8 * @date 2007/11/00 * @since 2007/11/00 (Oyranos: 0.1.8) */ int oyPixelToLcm2PixelLayout_ ( oyPixel_t pixel_layout, icColorSpaceSignature color_space ) { int cmm_pixel = 0; int chan_n = oyToChannels_m (pixel_layout); int c_off = oyToColorOffset_m (pixel_layout); oyDATATYPE_e data_type = oyToDataType_m (pixel_layout); int planar = oyToPlanar_m (pixel_layout); int flavour = oyToFlavor_m (pixel_layout); unsigned int cchans = l2cmsChannelsOf( (cmsColorSpaceSignature)color_space ); unsigned int l2cms_color_space = l2_cmsLCMScolorSpace( (cmsColorSpaceSignature)color_space ); int extra = chan_n - cchans; if(chan_n > CMMMaxChannels_M) l2cms_msg( oyMSG_WARN,0, OY_DBG_FORMAT_" " "can not handle more than %d channels; found: %d", OY_DBG_ARGS_, CMMMaxChannels_M, chan_n); cmm_pixel |= CHANNELS_SH(cchans); if(extra) cmm_pixel |= EXTRA_SH(extra); if(c_off == 1) cmm_pixel |= SWAPFIRST_SH(1); if(data_type == oyUINT8) cmm_pixel |= BYTES_SH(1); else if(data_type == oyUINT16 || data_type == oyHALF) cmm_pixel |= BYTES_SH(2); else if(data_type == oyFLOAT) cmm_pixel |= BYTES_SH(4); else if(data_type == oyDOUBLE) cmm_pixel |= BYTES_SH(0); if(data_type == oyDOUBLE || data_type == oyFLOAT || data_type == oyHALF) cmm_pixel |= FLOAT_SH(1); if(oyToSwapColorChannels_m (pixel_layout)) cmm_pixel |= DOSWAP_SH(1); if(oyToByteswap_m(pixel_layout)) cmm_pixel |= ENDIAN16_SH(1); if(planar) cmm_pixel |= PLANAR_SH(1); if(flavour) cmm_pixel |= FLAVOR_SH(1); /* lcms2 uses V4 style value ranges */ cmm_pixel |= COLORSPACE_SH( l2cms_color_space ); return cmm_pixel; } /** Function l2cmsCMMDeleteTransformWrap * @brief * * @version Oyranos: 0.1.8 * @since 2007/12/00 (Oyranos: 0.1.8) * @date 2007/12/00 */ int l2cmsCMMDeleteTransformWrap(oyPointer * wrap) { if(wrap && *wrap) { l2cmsTransformWrap_s * s = (l2cmsTransformWrap_s*) *wrap; l2cmsDeleteTransform (s->l2cms); s->l2cms = 0; free(s); *wrap = 0; return 0; } return 1; } /** Function l2cmsTransformWrap_Set_ * @brief fill a l2cmsTransformWrap_s struct * * @version Oyranos: 0.1.8 * @since 2007/12/21 (Oyranos: 0.1.8) * @date 2007/12/21 */ l2cmsTransformWrap_s * l2cmsTransformWrap_Set_ ( cmsHTRANSFORM xform, icColorSpaceSignature color_in, icColorSpaceSignature color_out, oyPixel_t oy_pixel_layout_in, oyPixel_t oy_pixel_layout_out, oyPointer_s * oy ) { int error = !xform; l2cmsTransformWrap_s * s = 0; if(!error) { char * type_ = l2cmsTRANSFORM; int type = *((int32_t*)type_); l2cmsTransformWrap_s * ltw = calloc(sizeof(l2cmsTransformWrap_s), 1); ltw->type = type; ltw->l2cms = xform; xform = 0; ltw->sig_in = color_in; ltw->sig_out = color_out; ltw->oy_pixel_layout_in = oy_pixel_layout_in; ltw->oy_pixel_layout_out = oy_pixel_layout_out; s = ltw; if(oy_debug >= 2) l2cms_msg( oyMSG_DBG, NULL, OY_DBG_FORMAT_ " xform: "OY_PRINT_POINTER " ltw: "OY_PRINT_POINTER, OY_DBG_ARGS_, ltw->l2cms, ltw ); } if(!error) oyPointer_Set( oy, 0, 0, s, "l2cmsCMMDeleteTransformWrap", l2cmsCMMDeleteTransformWrap ); return s; } int l2cmsIntentFromOptions ( oyOptions_s * opts, int proof ) { int intent = 0, intent_proof = 0; const char * o_txt = 0; #ifndef oyStrlen_ #define oyStrlen_ strlen #endif o_txt = oyOptions_FindString ( opts, "rendering_intent", 0); if(o_txt && oyStrlen_(o_txt)) intent = atoi( o_txt ); o_txt = oyOptions_FindString ( opts, "rendering_intent_proof", 0); if(o_txt && oyStrlen_(o_txt)) intent_proof = atoi( o_txt ); intent_proof = intent_proof == 0 ? INTENT_RELATIVE_COLORIMETRIC : INTENT_ABSOLUTE_COLORIMETRIC; if(oy_debug > 2) l2cms_msg( oyMSG_WARN, (oyStruct_s*)opts, OY_DBG_FORMAT_"\n" " proof: %d intent: %d intent_proof: %d\n", OY_DBG_ARGS_, proof, intent, intent_proof ); if(proof) return intent_proof; else return intent; } uint32_t l2cmsFlagsFromOptions ( oyOptions_s * opts ) { int bpc = 0, gamut_warning = 0, precalculation = 0, precalculation_curves = 1, no_white_on_white_fixup = 1, flags = 0; const char * o_txt = 0; static int precalculation_curves_warn = 0; o_txt = oyOptions_FindString ( opts, "rendering_bpc", 0 ); if(o_txt && oyStrlen_(o_txt)) bpc = atoi( o_txt ); o_txt = oyOptions_FindString ( opts, "rendering_gamut_warning", 0 ); if(o_txt && oyStrlen_(o_txt)) gamut_warning = atoi( o_txt ); o_txt = oyOptions_FindString ( opts, "precalculation", 0 ); if(o_txt && oyStrlen_(o_txt)) precalculation = atoi( o_txt ); o_txt = oyOptions_FindString ( opts, "precalculation_curves", 0 ); if(o_txt && oyStrlen_(o_txt)) precalculation_curves = atoi( o_txt ); o_txt = oyOptions_FindString ( opts, "no_white_on_white_fixup", 0 ); if(o_txt && oyStrlen_(o_txt)) no_white_on_white_fixup = atoi( o_txt ); /* this should be moved to the CMM and not be handled here in Oyranos */ flags = bpc ? flags | cmsFLAGS_BLACKPOINTCOMPENSATION : flags & (~cmsFLAGS_BLACKPOINTCOMPENSATION); flags = gamut_warning ? flags | cmsFLAGS_GAMUTCHECK : flags & (~cmsFLAGS_GAMUTCHECK); flags = no_white_on_white_fixup ? flags | cmsFLAGS_NOWHITEONWHITEFIXUP : flags & (~cmsFLAGS_NOWHITEONWHITEFIXUP); switch(precalculation) { case 0: flags |= 0; break; case 1: flags |= cmsFLAGS_NOOPTIMIZE; break; case 2: flags |= cmsFLAGS_HIGHRESPRECALC; break; case 3: flags |= cmsFLAGS_LOWRESPRECALC; break; } if(l2cmsGetEncodedCMMversion() >= 2070) { switch(precalculation_curves) { case 0: flags |= 0; break; case 1: flags |= cmsFLAGS_CLUT_POST_LINEARIZATION | cmsFLAGS_CLUT_PRE_LINEARIZATION; break; } } else if(precalculation_curves_warn++ == 0) l2cms_msg( oyMSG_WARN, (oyStruct_s*)opts, OY_DBG_FORMAT_ "Skipping cmsFLAGS_CLUT_POST_LINEARIZATION! Can not handle flag for DL creation. v%d\n", OY_DBG_ARGS_, l2cmsGetEncodedCMMversion() ); if(oy_debug > 2) l2cms_msg( oyMSG_DBG, (oyStruct_s*)opts, OY_DBG_FORMAT_"\n" "%s\n", OY_DBG_ARGS_, l2cmsFlagsToText(flags) ); return flags; } uint16_t in[4] = {32000,32000,32000,0}, out[4] = {65535,65535,65535,65535}; /** Function l2cmsCMMConversionContextCreate_ * @brief create a CMM transform * * @version Oyranos: 0.3.3 * @since 2008/12/28 (Oyranos: 0.1.10) * @date 2011/11/18 */ cmsHTRANSFORM l2cmsCMMConversionContextCreate_ ( oyFilterNode_s * node, cmsHPROFILE * lps, int profiles_n, oyProfiles_s * simulation, int proof_n, int proof, oyPixel_t oy_pixel_layout_in, oyPixel_t oy_pixel_layout_out, oyOptions_s * opts, l2cmsTransformWrap_s ** ltw, oyPointer_s * oy ) { oyPixel_t l2cms_pixel_layout_in = 0; oyPixel_t l2cms_pixel_layout_out = 0; int error = !lps; cmsHTRANSFORM xform = 0; cmsHPROFILE * merge = 0; icColorSpaceSignature color_in = 0; icColorSpaceSignature color_out = 0; icProfileClassSignature profile_class_in = 0; int intent = l2cmsIntentFromOptions( opts,0 ), intent_proof = l2cmsIntentFromOptions( opts,1 ), cmyk_cmyk_black_preservation = 0, flags = l2cmsFlagsFromOptions( opts ), gamut_warning = flags & cmsFLAGS_GAMUTCHECK; const char * o_txt = 0; double adaption_state = 0.0; int multi_profiles_n = profiles_n; if(!lps || !profiles_n || !oy_pixel_layout_in || !oy_pixel_layout_out) return 0; flags = proof ? flags | cmsFLAGS_SOFTPROOFING : flags & (~cmsFLAGS_SOFTPROOFING); if(!error && lps[0] && lps[profiles_n-1]) { color_in = (icColorSpaceSignature) l2cmsGetColorSpace( lps[0] ); if(profiles_n > 1) color_out = (icColorSpaceSignature) l2cmsGetColorSpace( lps[profiles_n-1] ); else color_out = (icColorSpaceSignature) l2cmsGetPCS( lps[profiles_n-1] ); profile_class_in = (icProfileClassSignature) l2cmsGetDeviceClass( lps[0] ); } l2cms_pixel_layout_in = oyPixelToLcm2PixelLayout_(oy_pixel_layout_in, color_in); l2cms_pixel_layout_out = oyPixelToLcm2PixelLayout_(oy_pixel_layout_out, color_out); o_txt = oyOptions_FindString ( opts, "cmyk_cmyk_black_preservation", 0 ); if(o_txt && oyStrlen_(o_txt)) cmyk_cmyk_black_preservation = atoi( o_txt ); intent = cmyk_cmyk_black_preservation ? intent + 10 : intent; if(cmyk_cmyk_black_preservation == 2) intent += 13; o_txt = oyOptions_FindString ( opts, "adaption_state", 0 ); if(o_txt && oyStrlen_(o_txt)) oyStringToDouble( o_txt, &adaption_state ); if(!error) { cmsUInt32Number * intents=0; cmsBool * bpc=0; cmsFloat64Number * adaption_states=0; if(profiles_n == 1 || profile_class_in == icSigLinkClass) { /* we have to erase the color space */ #if 1 int csp = T_COLORSPACE(l2cms_pixel_layout_in); l2cms_pixel_layout_in &= (~COLORSPACE_SH( csp )); csp = T_COLORSPACE(l2cms_pixel_layout_out); l2cms_pixel_layout_out &= (~COLORSPACE_SH( csp )); #endif xform = l2cmsCreateTransform( lps[0], l2cms_pixel_layout_in, 0, l2cms_pixel_layout_out, (intent > 3)?0:intent, flags | cmsFLAGS_KEEP_SEQUENCE ); } else if(profiles_n == 2 && (!proof_n || (!proof && !gamut_warning))) { oyAllocHelper_m_( intents, cmsUInt32Number, 2,0, goto end); oyAllocHelper_m_( bpc, cmsBool, 2,0, goto end); oyAllocHelper_m_( adaption_states, cmsFloat64Number, 2,0, goto end); intents[0] = intent; intents[1] = intent; bpc[0] = flags & cmsFLAGS_BLACKPOINTCOMPENSATION; bpc[1] = flags & cmsFLAGS_BLACKPOINTCOMPENSATION; adaption_states[0] = adaption_state; adaption_states[1] = adaption_state; xform = l2cmsCreateExtendedTransform( 0, profiles_n, lps, bpc, intents, adaption_states, NULL, 0, l2cms_pixel_layout_in, l2cms_pixel_layout_out, flags | cmsFLAGS_KEEP_SEQUENCE ); } else { int i; if(proof_n && (proof || gamut_warning)) { int len = sizeof(cmsHPROFILE) * (profiles_n + proof_n); oyAllocHelper_m_( merge, cmsHPROFILE, profiles_n + proof_n,0, goto end); memset( merge, 0, len ); memcpy( merge, lps, sizeof(cmsHPROFILE) * (profiles_n - 1) ); for(i = 0; i < proof_n; ++i) { l2cmsProfileWrap_s * wrap = l2cmsAddProofProfile( oyProfiles_Get(simulation,i),flags, intent, intent_proof, 0); merge[profiles_n-1 + i] = wrap->l2cms; } merge[profiles_n + proof_n -1] = lps[profiles_n - 1]; /* merge effect and simulation profiles */ multi_profiles_n += proof_n; lps = merge; } if(flags & cmsFLAGS_GAMUTCHECK) flags |= cmsFLAGS_GRIDPOINTS(l2cmsPROOF_LUT_GRID_RASTER); if(oy_debug > 2) { uint32_t f = l2cms_pixel_layout_in; printf ("%s:%d %s() float:%d optimised:%d colorspace:%d extra:%d channels:%d lcms_bytes %d \n", __FILE__,__LINE__,__func__, T_FLOAT(f), T_OPTIMIZED(f), T_COLORSPACE(f), T_EXTRA(f), T_CHANNELS(f), T_BYTES(f) ); f = l2cms_pixel_layout_out; printf ("%s:%d %s() float:%d optimised:%d colorspace:%d extra:%d channels:%d lcms_bytes %d \n", __FILE__,__LINE__,__func__, T_FLOAT(f), T_OPTIMIZED(f), T_COLORSPACE(f), T_EXTRA(f), T_CHANNELS(f), T_BYTES(f) ); printf("multi_profiles_n: %d intent: %d adaption: %g flags: %d \"%s\" l1 %d, l2 %d\n", multi_profiles_n, intent, adaption_state, flags, l2cmsFlagsToText(flags), l2cms_pixel_layout_in, l2cms_pixel_layout_out); } #define SET_ARR(arr,val,n) for(i = 0; i < n; ++i) arr[i] = val; oyAllocHelper_m_( intents, cmsUInt32Number, multi_profiles_n,0, goto end); oyAllocHelper_m_( bpc, cmsBool, multi_profiles_n,0, goto end); oyAllocHelper_m_( adaption_states, cmsFloat64Number, multi_profiles_n,0, goto end); SET_ARR(intents,intent,multi_profiles_n); SET_ARR(bpc,flags & cmsFLAGS_BLACKPOINTCOMPENSATION,multi_profiles_n); SET_ARR(adaption_states,adaption_state,multi_profiles_n); xform = l2cmsCreateExtendedTransform( 0, multi_profiles_n, lps, bpc, intents, adaption_states, NULL, 0, l2cms_pixel_layout_in, l2cms_pixel_layout_out, flags | cmsFLAGS_KEEP_SEQUENCE ); if(oy_debug >= 2) { int i; l2cms_msg( oyMSG_DBG, (oyStruct_s*)opts, OY_DBG_FORMAT_"l2cmsCreateExtendedTransform(multi_profiles_n %d)" " xform: "OY_PRINT_POINTER, OY_DBG_ARGS_, multi_profiles_n, xform, ltw ); #if LCMS_VERSION >= 2060 for(i = 0; i < multi_profiles_n; ++i) { oyProfile_s * p = l2cmsGetContextUserData( l2cmsGetProfileContextID( lps[i] ) ); const char * fn = oyProfile_GetFileName( p, -1 ); size_t size = 0; char * block = oyProfile_GetMem( p, &size, 0, oyAllocateFunc_ ); oyFree_m_(block); fprintf( stdout, " -> \"%s\"[%lu]", fn?fn:"----", (long unsigned int)size ); } fprintf(stdout, "\n"); #endif } #ifdef ENABLE_MPE unsigned char in[3] = {128,128,128}; unsigned short o[3]; l2cmsDoTransform( xform, in, o, 1 ); printf("%d %d %d\n", o[0],o[1],o[2]); #endif /* ENABLE_MPE */ oyFree_m_( intents ); oyFree_m_( bpc ); oyFree_m_( adaption_states ); } if(intents) free(intents); } if(!xform || oy_debug > 2) { int level = oyMSG_DBG; uint32_t f = l2cms_pixel_layout_in, i; if(!xform) { level = oyMSG_WARN; error = 1; } l2cms_msg( level, (oyStruct_s*)opts, OY_DBG_FORMAT_ " float:%d optimised:%d colorspace:%d extra:%d channels:%d lcms_bytes %d", OY_DBG_ARGS_, T_FLOAT(f), T_OPTIMIZED(f), T_COLORSPACE(f), T_EXTRA(f), T_CHANNELS(f), T_BYTES(f) ); f = l2cms_pixel_layout_out; l2cms_msg( level, (oyStruct_s*)opts, OY_DBG_FORMAT_ "float:%d optimised:%d colorspace:%d extra:%d channels:%d lcms_bytes %d", OY_DBG_ARGS_, T_FLOAT(f), T_OPTIMIZED(f), T_COLORSPACE(f), T_EXTRA(f), T_CHANNELS(f), T_BYTES(f) ); l2cms_msg( level, (oyStruct_s*)opts, OY_DBG_FORMAT_ "multi_profiles_n: %d intent: %d adaption: %g \"%s\"", OY_DBG_ARGS_, multi_profiles_n, intent, adaption_state, l2cmsFlagsToText(flags)); for(i=0; i < (unsigned)profiles_n; ++i) l2cms_msg( level,(oyStruct_s*)node, OY_DBG_FORMAT_"\n" " ColorSpace:%s->PCS:%s DeviceClass:%s", OY_DBG_ARGS_, lps[0]?oyICCColorSpaceGetName((icColorSpaceSignature) l2cmsGetColorSpace( lps[0])):"----", lps[i]?oyICCColorSpaceGetName((icColorSpaceSignature) l2cmsGetPCS( lps[i] )):"----", lps[i]?oyICCDeviceClassDescription((icProfileClassSignature) l2cmsGetDeviceClass(lps[i])):"----" ); } if(!error && ltw && oy) *ltw= l2cmsTransformWrap_Set_( xform, color_in, color_out, oy_pixel_layout_in, oy_pixel_layout_out, oy ); end: return xform; } /** Function l2cmsCMMColorConversion_ToMem_ * * convert a l2cms color conversion context to a device link * * @version Oyranos: 0.1.10 * @since 2008/12/28 (Oyranos: 0.1.10) * @date 2008/12/28 */ oyPointer l2cmsCMMColorConversion_ToMem_ ( cmsHTRANSFORM * xform, oyOptions_s * opts, size_t * size, oyAlloc_f allocateFunc ) { int error = !xform; oyPointer data = 0; int flags = l2cmsFlagsFromOptions( opts ); if(!error) { cmsHPROFILE dl= l2cmsTransform2DeviceLink( xform, 4.3, flags | cmsFLAGS_KEEP_SEQUENCE ); *size = 0; #if 0 { int nargs = 1, i; size_t size = sizeof(int) + nargs * sizeof(cmsPSEQDESC); LPcmsSEQ pseq = (LPcmsSEQ) oyAllocateFunc_(size); ZeroMemory(pseq, size); pseq ->n = nargs; for (i=0; i < nargs; i++) { strcpy(pseq ->seq[i].Manufacturer, CMM_NICK); strcpy(pseq ->seq[i].Model, "CMM "); } cmsAddTag(dl, icSigProfileSequenceDescTag, pseq); free(pseq); } #endif data = lcm2WriteProfileToMem( dl, size, allocateFunc ); } return data; } oyOptions_s* l2cmsFilter_CmmIccValidateOptions ( oyFilterCore_s * filter, oyOptions_s * validate OY_UNUSED, int statical OY_UNUSED, uint32_t * result ) { uint32_t error = !filter; if(!error) error = oyFilterRegistrationMatch(oyFilterCore_GetRegistration(filter), "//"OY_TYPE_STD"/icc_color", oyOBJECT_CMM_API4_S); *result = error; return 0; } oyWIDGET_EVENT_e l2cmsWidgetEvent ( oyOptions_s * options OY_UNUSED, oyWIDGET_EVENT_e type OY_UNUSED, oyStruct_s * event OY_UNUSED ) {return 0;} oyDATATYPE_e l2cms_cmmIcc_data_types[7] = {oyUINT8, oyUINT16, oyHALF, oyFLOAT, oyDOUBLE, 0}; oyConnectorImaging_s_ l2cms_cmmIccSocket_connector = { oyOBJECT_CONNECTOR_IMAGING_S,0,0, (oyObject_s)&oy_connector_imaging_static_object, oyCMMgetImageConnectorSocketText, /* getText */ oy_image_connector_texts, /* texts */ "//" OY_TYPE_STD "/manipulator.data", /* connector_type */ oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */ 0, /* is_plug == oyFilterPlug_s */ l2cms_cmmIcc_data_types, /* data_types */ 3, /* data_types_n; elements in data_types array */ 1, /* max_color_offset */ 1, /* min_channels_count; */ 16, /* max_channels_count; */ 1, /* min_color_count; */ 16, /* max_color_count; */ 1, /* can_planar; can read separated channels */ 1, /* can_interwoven; can read continuous channels */ 1, /* can_swap; can swap color channels (BGR)*/ 1, /* can_swap_bytes; non host byte order */ 1, /* can_revert; revert 1 -> 0 and 0 -> 1 */ 1, /* can_premultiplied_alpha; */ 1, /* can_nonpremultiplied_alpha; */ 0, /* can_subpixel; understand subpixel order */ 0, /* oyCHANNELTYPE_e * channel_types; */ 0, /* channel_types_n */ 1, /* id; relative to oyFilterCore_s, e.g. 1 */ 0 /* is_mandatory; mandatory flag */ }; oyConnectorImaging_s_* l2cms_cmmIccSocket_connectors[2]={&l2cms_cmmIccSocket_connector,0}; oyConnectorImaging_s_ l2cms_cmmIccPlug_connector = { oyOBJECT_CONNECTOR_IMAGING_S,0,0, (oyObject_s)&oy_connector_imaging_static_object, oyCMMgetImageConnectorPlugText, /* getText */ oy_image_connector_texts, /* texts */ "//" OY_TYPE_STD "/manipulator.data", /* connector_type */ oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */ 1, /* is_plug == oyFilterPlug_s */ l2cms_cmmIcc_data_types, /* data_types */ 3, /* data_types_n; elements in data_types array */ 1, /* max_color_offset */ 1, /* min_channels_count; */ 16, /* max_channels_count; */ 1, /* min_color_count; */ 16, /* max_color_count; */ 1, /* can_planar; can read separated channels */ 1, /* can_interwoven; can read continuous channels */ 1, /* can_swap; can swap color channels (BGR)*/ 1, /* can_swap_bytes; non host byte order */ 1, /* can_revert; revert 1 -> 0 and 0 -> 1 */ 1, /* can_premultiplied_alpha; */ 1, /* can_nonpremultiplied_alpha; */ 0, /* can_subpixel; understand subpixel order */ 0, /* oyCHANNELTYPE_e * channel_types; */ 0, /* channel_types_n */ 1, /* id; relative to oyFilterCore_s, e.g. 1 */ 0 /* is_mandatory; mandatory flag */ }; oyConnectorImaging_s_* l2cms_cmmIccPlug_connectors[2]={&l2cms_cmmIccPlug_connector,0}; /** Function l2cmsAddProofProfile * @brief add a abstract proofing profile to the l2cms profile stack * * Look in the Oyranos cache for a CMM internal representation or generate a * new abstract profile containing the proofing profiles changes. This can be * a proofing color space simulation or out of gamut marking. * * @version Oyranos: 0.9.6 * @date 2016/05/02 * @since 2009/11/05 (Oyranos: 0.1.10) */ l2cmsProfileWrap_s*l2cmsAddProofProfile( oyProfile_s * proof, cmsUInt32Number flags, int intent, int intent_proof, uint32_t icc_profile_flags ) { int error = 0; cmsHPROFILE * hp = 0; oyPointer_s * cmm_ptr = 0; l2cmsProfileWrap_s * s = 0; char * hash_text = 0, num[12]; if(!proof || proof->type_ != oyOBJECT_PROFILE_S) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)proof, OY_DBG_FORMAT_ "no profile provided %s", OY_DBG_ARGS_, (proof != NULL) ? oyStruct_GetText( (oyStruct_s*) proof->type_, oyNAME_NAME, 0 ) : "" ); return 0; } /* build hash text */ STRING_ADD( hash_text, "abstract proofing profile " ); STRING_ADD( hash_text, oyObject_GetName( proof->oy_, oyNAME_NICK ) ); STRING_ADD( hash_text, " intent:" ); sprintf( num, "%d", intent ); STRING_ADD( hash_text, num ); STRING_ADD( hash_text, " intent_proof:" ); sprintf( num, "%d", intent_proof ); STRING_ADD( hash_text, num ); STRING_ADD( hash_text, " flags|gmtCheck|softPrf:" ); sprintf( num, "%d|%d|%d", (int)flags, (flags & cmsFLAGS_GAMUTCHECK)?1:0, (flags & cmsFLAGS_SOFTPROOFING)?1:0 ); STRING_ADD( hash_text, num ); /* cache look up */ cmm_ptr = oyPointer_LookUpFromText( hash_text, l2cmsPROFILE ); oyPointer_Set( cmm_ptr, CMM_NICK, 0,0,0,0 ); /* for empty profile create a new abstract one */ if(!oyPointer_GetPointer(cmm_ptr)) { oyPointer_s * oy = cmm_ptr; char * type_ = l2cmsPROFILE; uint32_t type = *((uint32_t*)type_); size_t size = 0; oyPointer block = 0; l2cmsProfileWrap_s * s = calloc(sizeof(l2cmsProfileWrap_s), 1); if(oy_debug > 3) fprintf( stderr, OY_DBG_FORMAT_" created: \"%s\"", OY_DBG_ARGS_, hash_text ); else l2cms_msg( oyMSG_DBG, (oyStruct_s*)proof, OY_DBG_FORMAT_" created abstract proofing profile: \"%s\"", OY_DBG_ARGS_, hash_text ); /* create */ hp = l2cmsGamutCheckAbstract( proof, flags, intent, intent_proof, icc_profile_flags ); if(hp) { /* save to memory */ block = lcm2WriteProfileToMem( hp, &size, malloc ); l2cmsCloseProfile( hp ); hp = 0; } s->type = type; s->size = size; s->block = block; /* reopen */ #if LCMS_VERSION < 2060 s->l2cms = CMMProfileOpen_M( proof, block, size ); #else { oyProfile_s * proof2 = oyProfile_Copy( proof, NULL ); cmsContext tc = l2cmsCreateContext( NULL, proof2 ); /* threading context */ l2cmsSetLogErrorHandlerTHR( tc, l2cmsErrorHandlerFunction ); s->l2cms = CMMProfileOpen_M( tc, block, size ); } #endif error = oyPointer_Set( oy, 0, l2cmsPROFILE, s, CMMToString_M(CMMProfileOpen_M), l2cmsCMMProfileReleaseWrap ); } if(!error) { s = l2cmsCMMProfile_GetWrap_( cmm_ptr ); error = !s; } oyPointer_Release( &cmm_ptr ); if(hash_text) oyFree_m_(hash_text); if(!error) return s; else { l2cms_msg( oyMSG_WARN, (oyStruct_s*)proof, OY_DBG_FORMAT_" adding %s failed", OY_DBG_ARGS_, oyProfile_GetText( proof, oyNAME_DESCRIPTION ) ); return 0; } } /** Function l2cmsAddProfile * @brief add a profile from Oyranos to the l2cms profile stack * * Look in the Oyranos cache for a CMM internal representation * * @version Oyranos: 0.1.10 * @since 2008/12/28 (Oyranos: 0.1.10) * @date 2008/12/28 */ cmsHPROFILE l2cmsAddProfile ( oyProfile_s * p ) { int error = 0; cmsHPROFILE * hp = 0; oyPointer_s * cmm_ptr = 0; l2cmsProfileWrap_s * s = 0; if(!p || p->type_ != oyOBJECT_PROFILE_S) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)p, OY_DBG_FORMAT_" " "no profile provided", OY_DBG_ARGS_ ); return 0; } cmm_ptr = oyPointer_LookUpFromObject( (oyStruct_s*)p, l2cmsPROFILE ); if(oy_debug >= 2) { l2cms_msg( oyMSG_DBG, (oyStruct_s*)p, OY_DBG_FORMAT_" going to open %s cmm_ptr: %d", OY_DBG_ARGS_, oyProfile_GetFileName( p,-1 ), oyStruct_GetId((oyStruct_s*)cmm_ptr) ); } if(!cmm_ptr) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)p, OY_DBG_FORMAT_" oyPointer_LookUpFromObject() failed", OY_DBG_ARGS_ ); return 0; } oyPointer_Set( cmm_ptr, CMM_NICK, 0,0,0,0 ); if(!oyPointer_GetPointer(cmm_ptr)) error = l2cmsCMMData_Open( (oyStruct_s*)p, cmm_ptr ); if(error) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)p, OY_DBG_FORMAT_" l2cmsCMMData_Open() failed", OY_DBG_ARGS_ ); } else { s = l2cmsCMMProfile_GetWrap_( cmm_ptr ); error = !s; if(error) l2cms_msg( oyMSG_WARN, (oyStruct_s*)p, OY_DBG_FORMAT_" l2cmsCMMProfile_GetWrap_() failed", OY_DBG_ARGS_ ); } if(!error) hp = s->l2cms; oyPointer_Release( &cmm_ptr ); if(!error) return hp; else return 0; } int gamutCheckSampler16(const cmsUInt16Number In[], cmsUInt16Number Out[], void * Cargo) { cmsCIELab Lab1, Lab2; oyPointer * ptr = (oyPointer*)Cargo; l2cmsLabEncoded2Float(&Lab1, In); l2cmsDoTransform( ptr[0], In, Out, 1 ); l2cmsLabEncoded2Float(&Lab2, Out); /*double d = cmsDeltaE( &Lab1, &Lab2 ); if(abs(d) > 10 && ptr[1] != NULL) { Lab2.L = 50.0; Lab2.a = Lab2.b = 0.0; }*/ l2cmsFloat2LabEncoded(Out, &Lab2); return TRUE; } int gamutCheckSamplerFloat ( const cmsFloat32Number In[], cmsFloat32Number Out[], void * Cargo ) { cmsCIELab Lab1, Lab2; double d; cmsFloat32Number i[3], o[3]; oyPointer * ptr = (oyPointer*)Cargo; i[0] = Lab1.L = In[0] * 100.0; i[1] = Lab1.a = In[1] * 257.0 - 128.0; i[2] = Lab1.b = In[2] * 257.0 - 128.0; l2cmsDoTransform( ptr[0], i, o, 1 ); Lab2.L = o[0]; Lab2.a = o[1]; Lab2.b = o[2]; d = l2cmsDeltaE( &Lab1, &Lab2 ); if((fabs(d) > 10) && ptr[1] != NULL) { Lab2.L = 50.0; Lab2.a = Lab2.b = 0.0; } Out[0] = Lab2.L/100.0; Out[1] = (Lab2.a + 128.0) / 257.0; Out[2] = (Lab2.b + 128.0) / 257.0; return TRUE; } const char * oyICCMpeDescription(cmsStageSignature sig, int type ) { #if LCMS_VERSION >= 2060 switch ((unsigned int)sig) { // Multi process elements types case cmsSigCurveSetElemType: return type ? "cvst" : _("Curve Set"); case cmsSigMatrixElemType: return type ? "matf" : _("Matrix"); case cmsSigCLutElemType: return type ? "clut" : _("Look Up Table"); case cmsSigBAcsElemType: return type ? "bACS" : _("BAcs"); case cmsSigEAcsElemType: return type ? "eACS" : _("EAcs"); // Custom from here, not in the ICC Spec case cmsSigXYZ2LabElemType: return type ? "l2x " : _("XYZ2Lab"); case cmsSigLab2XYZElemType: return type ? "x2l " : _("Lab2XYZ"); case cmsSigNamedColorElemType: return type ? "ncl " : _("Named Color"); case cmsSigLabV2toV4: return type ? "2 4 " : _("V2toV4"); case cmsSigLabV4toV2: return type ? "4 2 " : _("V4toV2"); // Identities case cmsSigIdentityElemType: return type ? "idn " : _("Identity"); // Float to floatPCS case cmsSigLab2FloatPCS: return type ? "d2l '" : _("Lab2FloatPCS"); case cmsSigFloatPCS2Lab: return type ? "l2d '" : _("FloatPCS2Lab"); case cmsSigXYZ2FloatPCS: return type ? "d2x '" : _("XYZ2FloatPCS"); #if LCMS_VERSION >= 2070 case cmsSigFloatPCS2XYZ: return type ? "x2d '" : _("FloatPCS2XYZ"); case cmsSigClipNegativesElemType: return type ? "clp '" : _("Clip Negatives"); #endif /* >= 2070 */ case 0: return _("----"); default: #endif /* >= 2060 */ { static union { char c[8]; cmsStageSignature sig; } stage_sig = { .c[4] = 0 }; stage_sig.sig = (cmsStageSignature)oyValueUInt32( sig ); return stage_sig.c; } #if LCMS_VERSION >= 2060 } #endif } // A single stage struct _cmsStage_struct { cmsContext ContextID; cmsStageSignature Type; // Identifies the stage cmsStageSignature Implements; // Identifies the *function* of the stage (for optimizations) }; void printPipeline( cmsPipeline * lut ) { cmsStage * first = l2cmsPipelineGetPtrToFirstStage(lut), * next = first; int i = 0; do { fprintf(stderr, "stage[%d] %s:%s-%s %d -> %d\n", i, oyICCMpeDescription(l2cmsStageType(next),oyNAME_NICK), oyICCMpeDescription(next->Implements,oyNAME_NAME), oyICCMpeDescription(l2cmsStageType(next),oyNAME_NAME), l2cmsStageInputChannels(next), l2cmsStageOutputChannels(next) ); ++i; } while ((next = l2cmsStageNext( next )) != NULL); } oyProfiles_s * l2cmsProfilesFromOptions( oyFilterNode_s * node, oyFilterPlug_s * plug, oyOptions_s * node_options, const char * key, int profiles_switch, int verbose ) { oyProfiles_s * profiles = NULL; oyOption_s * o = NULL; if(profiles_switch || oy_debug || verbose) o = oyOptions_Find( node_options, key, oyNAME_PATTERN ); if(o) { profiles = (oyProfiles_s*) oyOption_GetStruct( o, oyOBJECT_PROFILES_S ); if((oy_debug || verbose)) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ " found \"%s\" %d switch %d", OY_DBG_ARGS_, key, oyProfiles_Count( profiles ), profiles_switch ); } else if( !profiles ) { oyFilterSocket_Callback( plug, oyCONNECTOR_EVENT_INCOMPATIBLE_OPTION ); l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ " incompatible \"%s\"", OY_DBG_ARGS_, key ); } oyOption_Release( &o ); } if(!profiles_switch) oyProfiles_Release( &profiles ); return profiles; } /** l2cmsFilterNode_CmmIccContextToMem() * @brief implement oyCMMFilterNode_CreateContext_f() * * @version Oyranos: 0.1.8 * @since 2008/11/01 (Oyranos: 0.1.8) * @date 2008/11/01 */ oyPointer l2cmsFilterNode_CmmIccContextToMem ( oyFilterNode_s * node, size_t * size, oyAlloc_f allocateFunc ) { oyPointer block = 0; int error = 0; int n,i,len; size_t size_ = 0; oyFilterPlug_s * plug = oyFilterNode_GetPlug( node, 0 ); oyFilterSocket_s * socket = oyFilterNode_GetSocket( node, 0 ), * remote_socket = oyFilterPlug_GetSocket( plug ); oyImage_s * image_input = 0, * image_output = 0; cmsHPROFILE * lps = 0; cmsHTRANSFORM xform = 0; oyOptions_s * node_tags = oyFilterNode_GetTags( node ), * node_options = oyFilterNode_GetOptions( node, 0 ); oyProfile_s * p = 0, * prof = 0, * image_input_profile, * image_output_profile; oyProfiles_s * profiles = 0, * profs = 0; oyProfileTag_s * psid = 0, * info = 0, * cprt = 0; int profiles_n = 0, profiles_simulation_n = 0, profiles_display_n = 0, proof = 0, effect_switch = 0; int verbose = oyOptions_FindString( node_tags, "verbose", "true" ) ? 1 : 0; image_input = (oyImage_s*)oyFilterSocket_GetData( remote_socket ); image_output = (oyImage_s*)oyFilterSocket_GetData( socket ); image_input_profile = oyImage_GetProfile( image_input ); image_output_profile = oyImage_GetProfile( image_output ); if(!image_input) goto l2cmsFilterNode_CmmIccContextToMemClean; if(image_input->type_ != oyOBJECT_IMAGE_S) { oyFilterSocket_Callback( plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA ); l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_" missed input image %d", OY_DBG_ARGS_, image_input->type_ ); } if(!image_output || image_output->type_ != oyOBJECT_IMAGE_S) { oyFilterSocket_Callback( plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA ); l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_" missed output image %d", OY_DBG_ARGS_, image_output?image_output->type_:0 ); } /*oyDATATYPE_e data_type = 0; data_type = oyToDataType_m( oyImage_GetPixelLayout( image_input, oyLAYOUT ) ); if(data_type == oyHALF) { oyFilterSocket_Callback( plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA ); l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_" can not handle oyHALF", OY_DBG_ARGS_ ); }*/ len = sizeof(cmsHPROFILE) * (15 + 2 + 1); lps = oyAllocateFunc_( len ); if(!lps) goto l2cmsFilterNode_CmmIccContextToMemClean; memset( lps, 0, len ); /* input profile */ lps[ profiles_n++ ] = l2cmsAddProfile( image_input_profile ); if(!image_input_profile) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_" " "missed image_input->profile_", OY_DBG_ARGS_ ); goto l2cmsFilterNode_CmmIccContextToMemClean; } p = oyProfile_Copy( image_input_profile, 0 ); profs = oyProfiles_New( 0 ); error = oyProfiles_MoveIn( profs, &p, -1 ); /* effect profiles */ effect_switch = oyOptions_FindString ( node_options, "effect_switch", "1" ) ? 1 : 0; profiles = l2cmsProfilesFromOptions( node, plug, node_options, "profiles_effect", effect_switch, verbose ); n = oyProfiles_Count( profiles ); if(n) for(i = 0; i < n; ++i) { p = oyProfiles_Get( profiles, i ); /* Look in the Oyranos cache for a CMM internal representation */ lps[ profiles_n++ ] = l2cmsAddProfile( p ); error = oyProfiles_MoveIn( profs, &p, -1 ); } oyProfiles_Release( &profiles ); /* simulation profile */ proof = oyOptions_FindString ( node_options, "proof_soft", "1" ) ? 1 : 0; proof += oyOptions_FindString ( node_options, "proof_hard", "1" ) ? 1 : 0; if(oy_debug > 2 && proof) l2cms_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_ " proof requested",OY_DBG_ARGS_); profiles = l2cmsProfilesFromOptions( node, plug, node_options, "profiles_simulation", proof, verbose ); n = oyProfiles_Count( profiles ); if(n) for(i = 0; i < n; ++i) { p = oyProfiles_Get( profiles, i ); if(oy_debug) l2cms_msg( oyMSG_DBG,(oyStruct_s*)node, OY_DBG_FORMAT_ " found profile: %s", OY_DBG_ARGS_, p?oyProfile_GetFileName( p,-1 ):"????"); error = oyProfiles_MoveIn( profs, &p, -1 ); ++profiles_simulation_n; oyProfile_Release( &p ); } else if(verbose || oy_debug > 2) l2cms_msg( oyMSG_DBG,(oyStruct_s*)node, OY_DBG_FORMAT_ " no simulation profile found", OY_DBG_ARGS_); /* display profile */ profiles_display_n = oyOptions_CountType( node_options, "display.abstract.icc_profile", oyOBJECT_PROFILE_S ); l2cms_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_ "display.abstract.icc_profile[] = %d", OY_DBG_ARGS_, profiles_display_n ); for(i = 0; i < profiles_display_n; ++i) { oyOption_s * o = NULL; error = oyOptions_GetType2( node_options, i, "display.abstract.icc_profile", oyNAME_PATTERN, oyOBJECT_PROFILE_S, NULL, &o ); const char * reg = oyOption_GetRegistration( o ); p = (oyProfile_s*) oyOption_GetStruct( o, oyOBJECT_PROFILE_S ); if(verbose || oy_debug > 2) l2cms_msg( verbose?oyMSG_WARN:oyMSG_DBG,(oyStruct_s*)node, OY_DBG_FORMAT_ "display.abstract.icc_profile[%d]: %s:%s", OY_DBG_ARGS_, i, reg, oyProfile_GetText(p,oyNAME_DESCRIPTION) ); oyOption_Release( &o ); lps[ profiles_n++ ] = l2cmsAddProfile( p ); p = oyProfile_Copy( p, 0 ); error = oyProfiles_MoveIn( profs, &p, -1 ); } /* output profile */ if(!image_output_profile) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_" " "missed image_output->profile_", OY_DBG_ARGS_ ); goto l2cmsFilterNode_CmmIccContextToMemClean; } lps[ profiles_n++ ] = l2cmsAddProfile( image_output_profile ); p = oyProfile_Copy( image_output_profile, 0 ); error = oyProfiles_MoveIn( profs, &p, -1 ); *size = 0; /* create the context */ xform = l2cmsCMMConversionContextCreate_( node, lps, profiles_n, profiles, profiles_simulation_n, proof, oyImage_GetPixelLayout( image_input, oyLAYOUT ), oyImage_GetPixelLayout( image_output, oyLAYOUT ), node_options, 0, 0 ); if(oy_debug > 3) l2cms_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_"\n%s", OY_DBG_ARGS_, oyFilterNode_GetText( node, oyNAME_NAME ) ); error = !xform; if(!error) { if(oy_debug) block = l2cmsCMMColorConversion_ToMem_( xform, node_options, size, oyAllocateFunc_ ); else block = l2cmsCMMColorConversion_ToMem_( xform, node_options, size, allocateFunc ); error = !block || !*size; l2cmsDeleteTransform( xform ); xform = 0; } else { l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_"\n" "loading failed profiles_n:%d profiles_simulation_n:%d profiles:%d", OY_DBG_ARGS_, profiles_n, profiles_simulation_n, oyProfiles_Count(profiles) ); l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_"\n" " input profile: \"%s\" %s %s->%s %s\n %s", OY_DBG_ARGS_, oyProfile_GetText( image_input_profile, oyNAME_DESCRIPTION ), oyProfile_GetText( image_input_profile, oyNAME_NAME ), oyICCColorSpaceGetName( oyProfile_GetSignature( image_input_profile, oySIGNATURE_COLOR_SPACE ) ), oyICCColorSpaceGetName( oyProfile_GetSignature( image_input_profile, oySIGNATURE_PCS ) ), oyICCDeviceClassDescription( oyProfile_GetSignature( image_input_profile, oySIGNATURE_CLASS ) ), oyPixelPrint(oyImage_GetPixelLayout( image_input, oyLAYOUT ), malloc)); l2cms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_"\n" " output profile: \"%s\" %s %s->%s %s\n %s", OY_DBG_ARGS_, oyProfile_GetText( image_input_profile, oyNAME_DESCRIPTION ), oyProfile_GetText( image_output_profile, oyNAME_NAME ), oyICCColorSpaceGetName( oyProfile_GetSignature( image_input_profile, oySIGNATURE_COLOR_SPACE ) ), oyICCColorSpaceGetName( oyProfile_GetSignature( image_input_profile, oySIGNATURE_PCS ) ), oyICCDeviceClassDescription( oyProfile_GetSignature( image_input_profile, oySIGNATURE_CLASS ) ), oyPixelPrint(oyImage_GetPixelLayout( image_output, oyLAYOUT ), malloc)); } /* additional tags for debugging */ if(!error && (oy_debug || verbose)) { if(!error) { size_ = *size; if(!size_) { block = NULL; goto l2cmsFilterNode_CmmIccContextToMemClean; } prof = oyProfile_FromMem( size_, block, 0, 0 ); /* icSigProfileSequenceIdentifierType */ { oyStructList_s * list = oyStructList_New(0); int i, n = oyProfiles_Count( profs ); for( i = 0; i < n ; ++i ) { oyProfile_s * p = oyProfiles_Get( profs, i ); oyStructList_MoveIn( list, (oyStruct_s**) &p, -1, 0 ); } psid = oyProfileTag_Create( list, icSigProfileSequenceIdentifierTag, icSigProfileSequenceIdentifierType, 0, 0 ); if(psid) error = oyProfile_TagMoveIn ( prof, &psid, -1 ); oyStructList_Release( &list ); } /* Info tag */ if(!error) { oyStructList_s * list = oyStructList_Create( oyOBJECT_NONE, "l2cmsFilterNode_CmmIccContextToMem()", 0); char h[5] = {"Info"}; uint32_t * hi = (uint32_t*)&h; char * cc_name = l2cmsFilterNode_GetText( node, oyNAME_NICK, oyAllocateFunc_ ); const char * lib_name = oyFilterNode_GetModuleName( node ); oyStructList_MoveInName( list, &cc_name, 0, oyNAME_NAME ); oyStructList_AddName( list, lib_name, 0, oyNAME_NICK ); info = oyProfileTag_Create( list, (icTagSignature)oyValueUInt32(*hi), icSigTextType, 0, 0); oyStructList_Release( &list ); if(info) error = oyProfile_TagMoveIn ( prof, &info, -1 ); } if(!error) cprt = oyProfile_GetTagById( prof, icSigCopyrightTag ); /* icSigCopyrightTag */ if(!error && !cprt) { oyStructList_s * list = oyStructList_New(0); error = oyStructList_AddName( list, "no copyright; use freely", -1, oyNAME_NAME ); if(!error) { cprt = oyProfileTag_Create( list, icSigCopyrightTag, icSigTextType, 0, 0); error = !cprt; } oyStructList_Release( &list ); if(!error) error = oyProfile_TagMoveIn ( prof, &cprt, -1 ); } if(block) { oyDeAllocateFunc_( block ); block = 0; size_ = 0; } block = oyProfile_GetMem( prof, &size_, 0, allocateFunc ); *size = size_; oyProfile_Release( &prof ); } } l2cmsFilterNode_CmmIccContextToMemClean: oyFilterPlug_Release( &plug ); oyFilterSocket_Release( &socket ); oyFilterSocket_Release( & remote_socket ); oyOptions_Release( &node_tags ); oyImage_Release( &image_input ); oyImage_Release( &image_output ); oyProfile_Release( &image_input_profile ); oyProfile_Release( &image_output_profile ); oyOptions_Release( &node_options ); oyProfiles_Release( &profs ); oyProfiles_Release( &profiles ); oyFree_m_( lps ); return block; } char * l2cmsImage_GetText ( oyImage_s * image, int verbose OY_UNUSED, oyAlloc_f allocateFunc ) { oyPixel_t pixel_layout = oyImage_GetPixelLayout(image,oyLAYOUT); int n = oyToChannels_m( pixel_layout ); oyProfile_s * profile = oyImage_GetProfile( image ); int cchan_n = oyProfile_GetChannelsCount( profile ); int coff_x = oyToColorOffset_m( pixel_layout ); oyDATATYPE_e t = oyToDataType_m( pixel_layout ); int swap = oyToSwapColorChannels_m( pixel_layout ); /*int revert= oyT_FLAVOR_M( pixel_layout );*/ int so = oyDataTypeGetSize( t ); char * text = oyAllocateFunc_(512); char * hash_text = 0; oyImage_s * s = image; /* describe the image */ oySprintf_( text, " <oyImage_s>\n"); hashTextAdd_m( text ); oySprintf_( text, " %s\n", oyProfile_GetText(profile, oyNAME_NAME)); hashTextAdd_m( text ); oySprintf_( text, " <channels all=\"%d\" color=\"%d\" />\n", n,cchan_n); hashTextAdd_m( text ); oySprintf_( text, " <offsets first_color_sample=\"%d\" next_pixel=\"%d\" />\n" /*" next line = %d\n"*/, coff_x, oyImage_GetPixelLayout( s,oyPOFF_X )/*, mask[oyPOFF_Y]*/ ); hashTextAdd_m( text ); if(swap || oyToByteswap_m( pixel_layout )) { hashTextAdd_m( " <swap" ); if(swap) hashTextAdd_m( " colorswap=\"yes\"" ); if( oyToByteswap_m( pixel_layout ) ) hashTextAdd_m( " byteswap=\"yes\"" ); hashTextAdd_m( " />\n" ); } if( oyToFlavor_m( pixel_layout ) ) { oySprintf_( text, " <flawor value=\"yes\" />\n" ); hashTextAdd_m( text ); } oySprintf_( text, " <sample_type value=\"%s[%dByte]\" />\n", oyDataTypeToText(t), so ); hashTextAdd_m( text ); oySprintf_( text, " </oyImage_s>"); hashTextAdd_m( text ); oyDeAllocateFunc_(text); if(allocateFunc != oyStruct_GetAllocator((oyStruct_s*)s)) { text = hash_text; hash_text = oyStringCopy_( text, allocateFunc ); oySTRUCT_FREE_m( s, text ); } text = 0; return hash_text; } /** Function l2cmsFilterNode_GetText * @brief implement oyCMMFilterNode_GetText_f() * * @version Oyranos: 0.1.10 * @since 2008/12/27 (Oyranos: 0.1.10) * @date 2009/06/02 */ char * l2cmsFilterNode_GetText ( oyFilterNode_s * node, oyNAME_e type OY_UNUSED, oyAlloc_f allocateFunc ) { #ifdef NO_OPT return oyStringCopy_( oyFilterNode_GetText( node, type ), allocateFunc ); #else const char * tmp = 0, * model = 0; char * hash_text = 0, * temp = 0; oyFilterNode_s * s = node; oyImage_s * in_image = 0, * out_image = 0; int verbose; oyOptions_s * node_opts = oyFilterNode_GetOptions( node, 0 ); oyOptions_s * node_tags = oyFilterNode_GetTags( node ), * opts_tmp, * opts_tmp2, * options; oyFilterCore_s * node_core = oyFilterNode_GetCore( node ); oyFilterPlug_s * plug = oyFilterNode_GetPlug( node, 0 ); oyFilterSocket_s * socket = oyFilterNode_GetSocket( node, 0 ), * remote_socket = oyFilterPlug_GetSocket( plug ); oyProfiles_s * profiles; oyProfile_s * p; int effect_switch, proof, profiles_display_n, i,n; /* pick all sockets (output) data */ out_image = (oyImage_s*)oyFilterSocket_GetData( remote_socket ); /* pick all plug (input) data */ in_image = (oyImage_s*)oyFilterSocket_GetData( socket ); if(!node) return 0; verbose = oyOptions_FindString( node_tags, "verbose", "true" ) ? 1 : 0; /* 1. create hash text */ hashTextAdd_m( "<oyFilterNode_s>\n " ); /* the filter text */ hashTextAdd_m( oyFilterCore_GetText( node_core, oyNAME_NAME ) ); /* make a description */ { /* input data */ hashTextAdd_m( " <data_in>\n" ); if(in_image) { temp = l2cmsImage_GetText( in_image, verbose, oyAllocateFunc_ ); hashTextAdd_m( temp ); oyDeAllocateFunc_(temp); temp = 0; } hashTextAdd_m( "\n </data_in>\n" ); /* pick inbuild defaults */ opts_tmp2 = oyOptions_FromText( l2cms_extra_options, 0, NULL ); opts_tmp = oyOptions_ForFilter( "//" OY_TYPE_STD "/icc_color", oyOPTIONSOURCE_FILTER | OY_SELECT_COMMON , 0 ); options = oyOptions_FromBoolean( opts_tmp, opts_tmp2, oyBOOLEAN_UNION,NULL); oyOptions_Release( &opts_tmp ); oyOptions_Release( &opts_tmp2 ); opts_tmp = options; /* add existing custom options */ options = oyOptions_FromBoolean( opts_tmp, node_opts, oyBOOLEAN_UNION,NULL); oyOptions_Release( &opts_tmp ); /* options -> xforms */ hashTextAdd_m( " <oyOptions_s>\n" ); model = oyOptions_GetText( options, oyNAME_NAME ); hashTextAdd_m( model ); hashTextAdd_m( "\n </oyOptions_s>\n" ); oyOptions_Release( &options ); /* abstract profiles */ proof = oyOptions_FindString ( node_opts, "proof_soft", "1" ) ? 1 : 0; proof += oyOptions_FindString ( node_opts, "proof_hard", "1" ) ? 1 : 0; effect_switch = oyOptions_FindString ( node_opts, "effect_switch", "1" ) ? 1 : 0; /* display profile */ profiles_display_n = oyOptions_CountType( node_opts, "display.abstract.icc_profile", oyOBJECT_PROFILE_S ); if(proof || effect_switch || profiles_display_n) hashTextAdd_m( " <oyProfiles_s>" ); profiles = l2cmsProfilesFromOptions( node, plug, node_opts, "profiles_effect", effect_switch, verbose ); n = oyProfiles_Count( profiles ); for(i = 0; i < n; ++i) { p = oyProfiles_Get( profiles, i ); model = oyProfile_GetText( p, oyNAME_NAME ); hashTextAdd_m( "\n " ); hashTextAdd_m( model ); oyProfile_Release( &p ); } oyProfiles_Release( &profiles ); for(i = 0; i < profiles_display_n; ++i) { oyOption_s * o = NULL; oyOptions_GetType2( node_opts, i, "display.abstract.icc_profile", oyNAME_PATTERN, oyOBJECT_PROFILE_S, NULL, &o ); p = (oyProfile_s*) oyOption_GetStruct( o, oyOBJECT_PROFILE_S ); oyOption_Release( &o ); model = oyProfile_GetText( p, oyNAME_NAME ); hashTextAdd_m( "\n " ); hashTextAdd_m( model ); oyProfile_Release( &p ); } if(proof || effect_switch || profiles_display_n) hashTextAdd_m( "\n </oyProfiles_s>\n" ); /* output data */ hashTextAdd_m( " <data_out>\n" ); if(out_image) { temp = l2cmsImage_GetText( out_image, verbose, oyAllocateFunc_ ); hashTextAdd_m( temp ); oyDeAllocateFunc_(temp); temp = 0; } hashTextAdd_m( "\n </data_out>\n" ); } hashTextAdd_m( tmp ); hashTextAdd_m( "</oyFilterNode_s>\n" ); oyOptions_Release( &node_opts ); oyOptions_Release( &node_tags ); oyFilterCore_Release( &node_core ); oyFilterPlug_Release( &plug ); oyFilterSocket_Release( &socket ); oyFilterSocket_Release( &remote_socket ); oyImage_Release( &in_image ); oyImage_Release( &out_image ); return oyStringCopy_( hash_text, allocateFunc ); #endif } /** Function l2cmsFlagsToText * @brief debugging helper * * @version Oyranos: 0.1.13 * @since 2010/11/28 (Oyranos: 0.1.13) * @date 2010/11/28 */ char * l2cmsFlagsToText ( int flags ) { char * t = 0; char num[24]; sprintf(num, "%d", flags); STRING_ADD( t, "flags[" ); STRING_ADD( t, num ); STRING_ADD( t, "]: " ); #define STRING_ADD_FLAG( flag_name ) \ if(flags & flag_name) \ STRING_ADD( t, " " #flag_name ); STRING_ADD_FLAG( cmsFLAGS_NOCACHE ); STRING_ADD_FLAG( cmsFLAGS_NOOPTIMIZE ); STRING_ADD_FLAG( cmsFLAGS_NULLTRANSFORM ); STRING_ADD_FLAG( cmsFLAGS_GAMUTCHECK ); STRING_ADD_FLAG( cmsFLAGS_SOFTPROOFING ); STRING_ADD_FLAG( cmsFLAGS_BLACKPOINTCOMPENSATION ); STRING_ADD_FLAG( cmsFLAGS_NOWHITEONWHITEFIXUP ); STRING_ADD_FLAG( cmsFLAGS_HIGHRESPRECALC ); STRING_ADD_FLAG( cmsFLAGS_LOWRESPRECALC ); STRING_ADD_FLAG( cmsFLAGS_8BITS_DEVICELINK ); STRING_ADD_FLAG( cmsFLAGS_GUESSDEVICECLASS ); STRING_ADD_FLAG( cmsFLAGS_KEEP_SEQUENCE ); STRING_ADD_FLAG( cmsFLAGS_FORCE_CLUT ); STRING_ADD_FLAG( cmsFLAGS_CLUT_POST_LINEARIZATION ); STRING_ADD_FLAG( cmsFLAGS_CLUT_PRE_LINEARIZATION ); return t; } /** Function l2cmsModuleData_Convert * @brief Convert a ICC device link to LittleCMS 2 color transform * @ingroup cmm_handling * * The function might be used to provide a module specific context. * Implements oyModuleData_Convert_f * * @version Oyranos: 0.1.10 * @since 2008/12/28 (Oyranos: 0.1.10) * @date 2008/12/28 */ int l2cmsModuleData_Convert ( oyPointer_s * data_in, oyPointer_s * data_out, oyFilterNode_s * node ) { int error = !data_in || !data_out; oyPointer_s * cmm_ptr_in = data_in, * cmm_ptr_out = data_out; l2cmsTransformWrap_s * ltw = 0; cmsHTRANSFORM xform = 0; cmsHPROFILE lps[2] = {0,0}; oyFilterPlug_s * plug = oyFilterNode_GetPlug( node, 0 ); oyFilterSocket_s * socket = oyFilterNode_GetSocket( node, 0 ), * remote_socket = oyFilterPlug_GetSocket( plug ); oyOptions_s * node_options = oyFilterNode_GetOptions( node, 0 ); oyImage_s * image_input = (oyImage_s*)oyFilterSocket_GetData( remote_socket ), * image_output = (oyImage_s*)oyFilterSocket_GetData( socket ); if(!error) { cmm_ptr_in = (oyPointer_s*) data_in; cmm_ptr_out = (oyPointer_s*) data_out; } if(!error && ( (strcmp( oyPointer_GetResourceName(cmm_ptr_in), oyCOLOR_ICC_DEVICE_LINK ) != 0) || (strcmp( oyPointer_GetResourceName(cmm_ptr_out), l2cmsTRANSFORM ) != 0) ) ) error = 1; if(!error) { #if LCMS_VERSION < 2060 lps[0] = CMMProfileOpen_M( node, oyPointer_GetPointer(cmm_ptr_in), oyPointer_GetSize( cmm_ptr_in) ); #else { oyFilterNode_s * node2 = oyFilterNode_Copy( node, NULL ); cmsContext tc = l2cmsCreateContext( NULL, node2 ); /* threading context */ l2cmsSetLogErrorHandlerTHR( tc, l2cmsErrorHandlerFunction ); lps[0] = CMMProfileOpen_M( tc, oyPointer_GetPointer(cmm_ptr_in), oyPointer_GetSize( cmm_ptr_in) ); } #endif xform = l2cmsCMMConversionContextCreate_( node, lps, 1, 0,0,0, oyImage_GetPixelLayout( image_input, oyLAYOUT ), oyImage_GetPixelLayout( image_output,oyLAYOUT ), node_options, &ltw, cmm_ptr_out ); if(oy_debug > 4) { oyProfile_s *p = oyProfile_FromMem( oyPointer_GetSize( cmm_ptr_in), oyPointer_GetPointer(cmm_ptr_in),0,0); uint32_t id[8]={0,0,0,0,0,0,0,0}; char * hash_text = oyStringCopy_( l2cmsTRANSFORM":", oyAllocateFunc_ ); char * t = l2cmsFilterNode_GetText( node, oyNAME_NICK, oyAllocateFunc_ ); STRING_ADD( hash_text, t ); oyFree_m_(t); oyMiscBlobGetHash_((void*)hash_text, oyStrlen_(hash_text), 0, (unsigned char*)id); oyStringAddPrintf_( &t, oyAllocateFunc_, oyDeAllocateFunc_, "node: %d hash: %08x%08x%08x%08x", oyStruct_GetId((oyStruct_s*)node), id[0],id[1],id[2],id[3] ); oyProfile_GetMD5( p, OY_COMPUTE, id ); oyStringAddPrintf_( &t, oyAllocateFunc_, oyDeAllocateFunc_, " oyDL: %08x%08x%08x%08x", id[0],id[1],id[2],id[3] ); if(oy_debug >= 1) l2cms_msg( oyMSG_DBG,(oyStruct_s*) node, OY_DBG_FORMAT_"oyDL: %08x%08x%08x%08x %s %s", OY_DBG_ARGS_, id[0],id[1],id[2],id[3], t, hash_text ); oyPointer_SetId( cmm_ptr_out, t ); oyProfile_Release( &p ); oyFree_m_(t); } if(!xform) { uint32_t f = oyImage_GetPixelLayout( image_input, oyLAYOUT ); l2cms_msg( oyMSG_WARN,(oyStruct_s*) node, OY_DBG_FORMAT_ "float:%d optimised:%d colorspace:%d extra:%d channels:%d lcms_bytes%d", OY_DBG_ARGS_, T_FLOAT(f), T_OPTIMIZED(f), T_COLORSPACE(f), T_EXTRA(f), T_CHANNELS(f), T_BYTES(f) ); error = 1; } { #if LCMS_VERSION >= 2060 oyFilterNode_s * node = l2cmsGetContextUserData( l2cmsGetProfileContextID( lps[0] ) ); oyFilterNode_Release( &node ); #endif CMMProfileRelease_M (lps[0] ); } } oyFilterPlug_Release( &plug ); oyFilterSocket_Release( &socket ); oyFilterSocket_Release( & remote_socket ); oyImage_Release( &image_input ); oyImage_Release( &image_output ); oyOptions_Release( &node_options ); return error; } char * oyCMMCacheListPrint_(); /** Function l2cmsFilterPlug_CmmIccRun * @brief implement oyCMMFilterPlug_GetNext_f() * * @version Oyranos: 0.1.10 * @since 2008/07/18 (Oyranos: 0.1.8) * @date 2011/06/17 */ int l2cmsFilterPlug_CmmIccRun ( oyFilterPlug_s * requestor_plug, oyPixelAccess_s * ticket ) { int j, k, n; int error = 0; oyDATATYPE_e data_type_in = 0, data_type_out = 0; int channels_out, channels_in; int bps_in; oyPixel_t pixel_layout_in, layout_out; oyFilterSocket_s * socket = oyFilterPlug_GetSocket( requestor_plug ); oyFilterPlug_s * plug = 0; oyFilterNode_s * input_node, * node = oyFilterSocket_GetNode( socket ); oyImage_s * image_input = 0, * image_output = 0; oyArray2d_s * array_in = 0, * array_out = 0; l2cmsTransformWrap_s * ltw = 0; oyPixelAccess_s * new_ticket = ticket; plug = oyFilterNode_GetPlug( node, 0 ); input_node = oyFilterNode_GetPlugNode( node, 0 ); image_input = oyFilterPlug_ResolveImage( plug, socket, ticket ); pixel_layout_in = oyImage_GetPixelLayout( image_input, oyLAYOUT ); channels_in = oyToChannels_m( pixel_layout_in ); image_output = oyPixelAccess_GetOutputImage( ticket ); layout_out = oyImage_GetPixelLayout( image_output, oyLAYOUT ); channels_out = oyToChannels_m( layout_out ); if(!channels_out) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_"layout_out %s channels %d", OY_DBG_ARGS_, oyPixelPrint(layout_out,malloc), channels_out ); channels_out = 1; } if(oyImage_GetPixelLayout( image_input, oyLAYOUT ) != oyImage_GetPixelLayout( image_output, oyLAYOUT )) { /* create a new ticket to avoid pixel layout conflicts */ /* keep old ticket array dimensions */ oyArray2d_s * a, * old_a = oyPixelAccess_GetArray( new_ticket ); new_ticket = oyPixelAccess_Copy( ticket, ticket->oy_ ); oyPixelAccess_SetOutputImage( new_ticket, image_input ); /* remove old array as it's layout does not fit */ oyPixelAccess_SetArray( new_ticket, 0, 0 ); /* should be empty */ a = oyPixelAccess_GetArray( new_ticket ); if(!a) { /* Use original pixel size for being save and do not fiddle with ROI's */ int w = oyArray2d_GetDataGeo1( old_a, 2 ) / channels_out; int h = oyArray2d_GetDataGeo1( old_a, 3 ); a = oyArray2d_Create( NULL, w * channels_in,h, oyToDataType_m( pixel_layout_in ), ticket->oy_ ); if(oy_debug) { l2cms_msg( oy_debug?oyMSG_WARN:oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_"layout_out(%d) != layout_in(%d) created %s", OY_DBG_ARGS_, layout_out, pixel_layout_in, oyArray2d_Show( a, channels_in )); } } oyArray2d_Release( &old_a ); oyPixelAccess_SetArray( new_ticket, a, 0 ); oyArray2d_Release( &a ); oyPixelAccess_SynchroniseROI( new_ticket, ticket ); if(oy_debug) l2cms_msg( oy_debug?oyMSG_WARN:oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_"new_ticket %s", OY_DBG_ARGS_, oyPixelAccess_Show( new_ticket )); } /* We let the input filter do its processing first. */ error = oyFilterNode_Run( input_node, plug, new_ticket ); if(error != 0) { l2cms_msg( oyMSG_ERROR, (oyStruct_s*)input_node, OY_DBG_FORMAT_"%s %d err:%d", OY_DBG_ARGS_, _("running new ticket failed"), oyStruct_GetId( (oyStruct_s*)new_ticket ), error ); return error; } array_in = oyPixelAccess_GetArray( new_ticket ); array_out = oyPixelAccess_GetArray( ticket ); if(oy_debug > 2) l2cms_msg( oyMSG_DBG, (oyStruct_s*)new_ticket, OY_DBG_FORMAT_"%s %cnew_ticket->array:%s %s[%d]", OY_DBG_ARGS_,_("Read from"), oyPixelAccess_ArrayIsFocussed( new_ticket )?' ':'~', oyArray2d_Show( array_in, channels_in ), _("Image"), oyStruct_GetId( (oyStruct_s*)image_input ) ); if(oy_debug > 2) l2cms_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_"%s %cticket->array:%s %s[%d]", OY_DBG_ARGS_,_("Write to"), oyPixelAccess_ArrayIsFocussed( ticket )?' ':'~', oyArray2d_Show( array_out, channels_out ), _("Image"), oyStruct_GetId( (oyStruct_s*)image_output ) ); data_type_in = oyToDataType_m( oyImage_GetPixelLayout( image_input, oyLAYOUT ) ); bps_in = oyDataTypeGetSize( data_type_in ); /*if(data_type_in == oyHALF) { oyFilterSocket_Callback( requestor_plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA ); l2cms_msg(oyMSG_WARN,(oyStruct_s*)ticket, OY_DBG_FORMAT_" can not handle oyHALF",OY_DBG_ARGS_); error = 1; }*/ if(!image_output) { l2cms_msg( oyMSG_WARN,(oyStruct_s*)ticket, OY_DBG_FORMAT_ " no ticket->output_image", OY_DBG_ARGS_); error = 1; } if(!error) { oyPointer_s * backend_data = oyFilterNode_GetContext( node ); data_type_out = oyToDataType_m( oyImage_GetPixelLayout( image_output, oyLAYOUT ) ); /* get transform */ error = l2cmsCMMTransform_GetWrap_( backend_data, &ltw ); if(oy_debug >= 2 && ltw) l2cms_msg( oyMSG_DBG, NULL, OY_DBG_FORMAT_ " xform: "OY_PRINT_POINTER " ltw: "OY_PRINT_POINTER " backend_data: %d", OY_DBG_ARGS_, ltw->l2cms, ltw, oyStruct_GetId((oyStruct_s*)backend_data) ); if(oy_debug > 4) /* verify context */ { int msg_type = oyMSG_DBG; uint32_t id[8]={0,0,0,0,0,0,0,0}; char * hash_text = oyStringCopy_( l2cmsTRANSFORM":", oyAllocateFunc_ ); char * t = 0; t = l2cmsFilterNode_GetText( node, oyNAME_NICK, oyAllocateFunc_ ); STRING_ADD( hash_text, t ); oyFree_m_(t); oyMiscBlobGetHash_((void*)hash_text, oyStrlen_(hash_text), 0, (unsigned char*)id); oyStringAddPrintf_( &t, oyAllocateFunc_, oyDeAllocateFunc_, "hash: %08x%08x%08x%08x", id[0],id[1],id[2],id[3] ); /* check if we obtained the context from our * l2cms_api4_cmm::l2cmsFilterNode_CmmIccContextToMem */ if(oyPointer_GetFuncName( backend_data ) && strstr(oyPointer_GetLibName( backend_data ),CMM_NICK) && /* check if context and actual options match */ oyPointer_GetId( backend_data ) && !strstr(oyPointer_GetId( backend_data ),t)) { /* send error message */ error = 1; msg_type = oyMSG_ERROR; l2cms_msg( msg_type, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "requested and actual contexts differ by hash",OY_DBG_ARGS_ ); } if(error || oy_debug > 4) l2cms_msg( msg_type, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "node: %d \"%s\" (context %s)\nwant: %s\n%s", OY_DBG_ARGS_, oyStruct_GetId((oyStruct_s*)node), t, oyNoEmptyString_m_(oyPointer_GetId( backend_data )), oy_debug > 0 && error > 0 ? hash_text : "----", oy_debug > 0 && error > 0 ? oyCMMCacheListPrint_() : "" ); if(oy_debug > 4 && error < 1) l2cms_msg( msg_type, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "%s", OY_DBG_ARGS_, hash_text ); oyFree_m_(hash_text); oyFree_m_(t); } oyPointer_Release( &backend_data ); } DBGs_PROG4_S( ticket, "channels in/out: %d[%d]->%d[%d]", channels_in, oyStruct_GetId((oyStruct_s*)image_input), channels_out, oyStruct_GetId((oyStruct_s*)image_output) ); if(ltw && !array_out) { l2cms_msg( oyMSG_ERROR,(oyStruct_s*)ticket, OY_DBG_FORMAT_ " no ticket->array", OY_DBG_ARGS_); error = 1; } /* now do some position blind manipulations */ if(ltw && error <= 0) { uint8_t * array_in_tmp = 0, * array_out_tmp = 0; float * array_in_tmp_flt = 0, * array_out_tmp_flt = 0; double * array_in_tmp_dbl = 0, * array_out_tmp_dbl = 0; uint8_t ** array_in_data = oyArray2d_GetData( array_in ), ** array_out_data = oyArray2d_GetData( array_out ); int threads_n = #if defined(_OPENMP) && defined(USE_OPENMP) omp_get_max_threads(); #else 1; #endif int w_in = (int)(oyArray2d_GetWidth(array_in)+0.5), w_out = (int)(oyArray2d_GetWidth(array_out)+0.5); int stride_in = w_in * bps_in; n = OY_MIN(w_in/channels_in, w_out/channels_out); if(oy_debug) l2cms_msg( oyMSG_DBG,(oyStruct_s*)ticket, OY_DBG_FORMAT_ " %s[%d]=\"%s\" threads_n: %d %s "OY_PRINT_POINTER " -> %s "OY_PRINT_POINTER" convert pixel: %d", OY_DBG_ARGS_, _("Node"),oyStruct_GetId((oyStruct_s*)node),oyStruct_GetInfo((oyStruct_s*)node,0,0), threads_n, oyArray2d_Show(array_in,channels_in),array_in_data, oyArray2d_Show(array_out,channels_out),array_out_data,n ); if(!(data_type_in == oyUINT8 || data_type_in == oyUINT16 || data_type_in == oyHALF || data_type_in == oyFLOAT || data_type_in == oyDOUBLE)) { oyFilterSocket_Callback( requestor_plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA ); error = 1; } if(ltw->sig_in == icSigXYZData && (data_type_in == oyFLOAT || data_type_in == oyDOUBLE)) { array_in_tmp = oyAllocateFunc_( stride_in * threads_n ); if(data_type_in == oyFLOAT) array_in_tmp_flt = (float*) array_in_tmp; else if(data_type_in == oyDOUBLE) array_in_tmp_dbl = (double*) array_in_tmp; } if(ltw->sig_out == icSigXYZData && (data_type_out == oyFLOAT || data_type_out == oyDOUBLE)) { array_out_tmp = array_out_data[0]; } /* - - - - - conversion - - - - - */ /*l2cms_msg(oyMSG_WARN,(oyStruct_s*)ticket, "%s: %d Start lines: %d", __FILE__,__LINE__, array_out->height);*/ if(!error) { const double xyz_factor = 1.0 + 32767.0/32768.0; const int use_xyz_scale = 1; int index = 0; int array_in_height = oyArray2d_GetHeight(array_in), array_out_height = oyArray2d_GetHeight(array_out), lines = OY_MIN(array_in_height, array_out_height); if(lines > threads_n * 10) { #if defined(USE_OPENMP) #pragma omp parallel for private(index,j,array_in_tmp_flt,array_in_tmp_dbl,array_out_tmp_flt,array_out_tmp_dbl) #endif for( k = 0; k < lines; ++k) { if(array_in_tmp && use_xyz_scale) { #if defined(_OPENMP) && defined(USE_OPENMP) index = omp_get_thread_num(); #endif memcpy( &array_in_tmp[stride_in*index], array_in_data[k], w_in * bps_in ); if(data_type_in == oyFLOAT) { array_in_tmp_flt = (float*) &array_in_tmp[stride_in*index]; for(j = 0; j < w_in; ++j) { array_in_tmp_flt[j] /= xyz_factor; } } else if(data_type_in == oyDOUBLE) { array_in_tmp_dbl = (double*) &array_in_tmp[stride_in*index]; for(j = 0; j < w_in; ++j) { array_in_tmp_dbl[j] /= xyz_factor; } } l2cmsDoTransform( ltw->l2cms, &array_in_tmp[stride_in*index], array_out_data[k], n ); } else l2cmsDoTransform( ltw->l2cms, array_in_data[k], array_out_data[k], n ); if(array_out_tmp && use_xyz_scale) { if(data_type_out == oyFLOAT) { array_out_tmp_flt = (float*) array_out_data[k]; for(j = 0; j < w_out; ++j) array_out_tmp_flt[j] *= xyz_factor; } else if(data_type_out == oyDOUBLE) { array_out_tmp_dbl = (double*) array_out_data[k]; for(j = 0; j < w_out; ++j) array_out_tmp_dbl[j] *= xyz_factor; } } } } else for( k = 0; k < lines; ++k) { if(array_in_tmp && use_xyz_scale) { memcpy( array_in_tmp, array_in_data[k], w_in * bps_in ); if(data_type_in == oyFLOAT) for(j = 0; j < w_in; ++j) { array_in_tmp_flt[j] /= xyz_factor; } if(data_type_in == oyDOUBLE) for(j = 0; j < w_in; ++j) { array_in_tmp_dbl[j] /= xyz_factor; } l2cmsDoTransform( ltw->l2cms, array_in_tmp, array_out_data[k], n ); } else l2cmsDoTransform( ltw->l2cms, array_in_data[k], array_out_data[k], n ); if(array_out_tmp && use_xyz_scale) { if(data_type_out == oyFLOAT) { array_out_tmp_flt = (float*) array_out_data[k]; for(j = 0; j < w_out; ++j) array_out_tmp_flt[j] *= xyz_factor; } else if(data_type_out == oyDOUBLE) { array_out_tmp_dbl = (double*) array_out_data[k]; for(j = 0; j < w_out; ++j) array_out_tmp_dbl[j] *= xyz_factor; } } } /*l2cms_msg(oyMSG_WARN,(oyStruct_s*)ticket, "%s: %d End width: %d", __FILE__,__LINE__, n);*/ } if(array_in_tmp) oyDeAllocateFunc_( array_in_tmp ); if(getenv("OY_DEBUG_WRITE")) { char * t = 0; oyStringAddPrintf( &t, 0,0, "%04d-%s-array_in[%d].ppm", ++oy_debug_write_id,CMM_NICK,oyStruct_GetId((oyStruct_s*)array_in)); oyArray2d_ToPPM_( (oyArray2d_s_*)array_in, t ); l2cms_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "wrote debug image to: %s", OY_DBG_ARGS_, t ); t[0] = '\000'; oyStringAddPrintf( &t, 0,0, "%04d-%s-array_out[%d].ppm", oy_debug_write_id,CMM_NICK,oyStruct_GetId((oyStruct_s*)array_out)); oyArray2d_ToPPM_( (oyArray2d_s_*)array_out, t ); l2cms_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "wrote debug image to: %s", OY_DBG_ARGS_, t ); t[0] = '\000'; oyStringAddPrintf( &t, 0,0, "%04d-%s-node[%d]-array_out[%d]%dc.ppm", oy_debug_write_id,CMM_NICK,oyStruct_GetId((oyStruct_s*)node),oyStruct_GetId((oyStruct_s*)array_out),channels_out); { oyProfile_s * p = oyImage_GetProfile( image_output ); oyImage_s * img = oyImage_Create( oyArray2d_GetWidth(array_out)/channels_out, oyArray2d_GetHeight(array_out),NULL, oyImage_GetPixelLayout( image_output, oyLAYOUT ), p, NULL ); oyImage_ReadArray( img, NULL, array_out, NULL ); oyImage_WritePPM( img, t, t ); oyProfile_Release( &p ); oyImage_Release( &img ); } l2cms_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "wrote debug image to: %s", OY_DBG_ARGS_, t ); oyFree_m_(t); } } else { oyFilterGraph_s * ticket_graph = oyPixelAccess_GetGraph( ticket ); oyOptions_s * ticket_graph_opts = oyFilterGraph_GetOptions( ticket_graph ); if(error) oyFilterSocket_Callback( requestor_plug, oyCONNECTOR_EVENT_INCOMPATIBLE_CONTEXT ); else oyFilterSocket_Callback( requestor_plug, oyCONNECTOR_EVENT_OK ); error = oyOptions_SetFromString( &ticket_graph_opts, "//" OY_TYPE_STD "/profile/dirty", "true", OY_CREATE_NEW ); oyFilterGraph_Release( &ticket_graph ); oyOptions_Release( &ticket_graph_opts ); error = 1; } if(oyImage_GetPixelLayout( image_input, oyLAYOUT ) != oyImage_GetPixelLayout( image_output, oyLAYOUT )) oyPixelAccess_Release( &new_ticket ); oyFilterPlug_Release( &plug ); oyFilterSocket_Release( &socket ); oyFilterNode_Release( &input_node ); oyFilterNode_Release( &node ); oyImage_Release( &image_input ); oyImage_Release( &image_output ); oyArray2d_Release( &array_in ); oyArray2d_Release( &array_out ); return error; } /* oyPointer oyCMMallocateFunc ( size_t size ) { oyPointer p = 0; if(size) p = malloc(size); return p; } void oyCMMdeallocateFunc ( oyPointer mem ) { if(mem) free(mem); }*/ /** Function l2cmsErrorHandlerFunction * @brief * * @version Oyranos: 0.1.8 * @date 2007/11/00 * @since 2007/11/00 (Oyranos: 0.1.8) */ void l2cmsErrorHandlerFunction ( cmsContext ContextID, cmsUInt32Number ErrorCode OY_UNUSED, const char * ErrorText ) { int code = 0; #if LCMS_VERSION < 2060 oyStruct_s * s = ContextID; #else oyStruct_s * s = ContextID ? l2cmsGetContextUserData( ContextID ) : NULL; #endif code = oyMSG_ERROR; l2cms_msg( code, s, CMM_NICK ": %s", ErrorText ); } /** Function l2cmsCMMMessageFuncSet * @brief * * @version Oyranos: 0.1.8 * @date 2007/11/00 * @since 2007/11/00 (Oyranos: 0.1.8) */ int l2cmsCMMMessageFuncSet ( oyMessage_f message_func ) { l2cms_msg = message_func; lcm2MessageFuncSet( message_func ); return 0; } char l2cms_extra_options[] = { "\n\ <" OY_TOP_SHARED ">\n\ <" OY_DOMAIN_INTERNAL ">\n\ <" OY_TYPE_STD ">\n\ <" "icc_color" ">\n\ <cmyk_cmyk_black_preservation.advanced>0</cmyk_cmyk_black_preservation.advanced>\n\ <precalculation.advanced>0</precalculation.advanced>\n\ <precalculation_curves.advanced>1</precalculation_curves.advanced>\n\ <adaption_state.advanced>1.0</adaption_state.advanced>\n\ <no_white_on_white_fixup.advanced>1</no_white_on_white_fixup.advanced>\n\ </" "icc_color" ">\n\ </" OY_TYPE_STD ">\n\ </" OY_DOMAIN_INTERNAL ">\n\ </" OY_TOP_SHARED ">\n" }; #define A(long_text) STRING_ADD( tmp, long_text) /** Function l2cmsGetOptionsUI * @brief return XFORMS for matching options * * @version Oyranos: 0.9.5 * @date 2014/01/08 * @since 2009/07/29 (Oyranos: 0.1.10) */ int l2cmsGetOptionsUI ( oyCMMapiFilter_s * module OY_UNUSED, oyOptions_s * options, int flags, char ** ui_text, oyAlloc_f allocateFunc ) { char * tmp = 0; tmp = (char *)oyOptions_FindString( options, "cmyk_cmyk_black_preservation", 0 ); if(tmp == 0) return 0; tmp = oyStringCopy_( "\ <xf:group type=\"frame\">\ <xf:label>little CMS 2 ", oyAllocateFunc_ ); A( _("Extended Options")); A( ":</xf:label>\n"); A("\ <xf:select1 ref=\"/" OY_TOP_SHARED "/" OY_DOMAIN_INTERNAL "/" OY_TYPE_STD "/" "icc_color/cmyk_cmyk_black_preservation\">\n\ <xf:label>" ); A( _("Black Preservation")); A( "</xf:label>\n\ <xf:hint>" ); A( _("Decide how to preserve the black channel for Cmyk to Cmyk transforms")); A( "</xf:hint>\n\ <xf:help>" ); A( _("Cmyk to Cmyk transforms can provide various strategies to preserve the black only channel. None means, black might change to Cmy and thus text prints not very well. LittleCMS 2 has added two different modes to deal with that: Black-ink-only preservation and black-plane preservation. The first is simple and effective: do all the colorimetric transforms but keep only K (preserving L*) where the source image is only black. The second mode is fair more complex and tries to preserve the WHOLE K plane.")); A( "</xf:help>\n\ <xf:choices>\n\ <xf:item>\n\ <xf:value>0</xf:value>\n\ <xf:label>"); A( _("[none]")); A( "</xf:label>\n\ </xf:item>\n\ <xf:item>\n\ <xf:value>1</xf:value>\n\ <xf:label>LCMS_PRESERVE_PURE_K</xf:label>\n\ </xf:item>\n\ <xf:item>\n\ <xf:value>2</xf:value>\n\ <xf:label>LCMS_PRESERVE_K_PLANE</xf:label>\n\ </xf:item>\n\ </xf:choices>\n\ </xf:select1>\n"); A("\ <xf:select1 ref=\"/" OY_TOP_SHARED "/" OY_DOMAIN_INTERNAL "/" OY_TYPE_STD "/" "icc_color/precalculation\">\n\ <xf:label>" ); A( _("Optimization")); A( "</xf:label>\n\ <xf:hint>" ); A( _("Color Transforms can be differently stored internally")); A( "</xf:hint>\n\ <xf:help>" ); A( _("Little CMS tries to optimize profile chains whatever possible. There are some built-in optimization schemes, and you can add new schemas by using a plug-in. This generally improves the performance of the transform, but may introduce a small delay of 1-2 seconds when creating the transform. If you are going to transform just few colors, you don't need this precalculations. Then, the flag cmsFLAGS_NOOPTIMIZE in cmsCreateTransform() can be used to inhibit the optimization process. See the API reference for a more detailed discussion of the flags.")); A( "</xf:help>\n\ <xf:choices>\n\ <xf:item>\n\ <xf:value>0</xf:value>\n\ <xf:label>normal</xf:label>\n\ </xf:item>\n\ <xf:item>\n\ <xf:value>1</xf:value>\n\ <xf:label>LCMS2_NOOPTIMIZE</xf:label>\n\ </xf:item>\n\ <xf:item>\n\ <xf:value>2</xf:value>\n\ <xf:label>LCMS2_HIGHRESPRECALC</xf:label>\n\ </xf:item>\n\ <xf:item>\n\ <xf:value>3</xf:value>\n\ <xf:label>LCMS2_LOWRESPRECALC</xf:label>\n\ </xf:item>\n\ </xf:choices>\n\ </xf:select1>\n"); A("\ <xf:select1 ref=\"/" OY_TOP_SHARED "/" OY_DOMAIN_INTERNAL "/" OY_TYPE_STD "/" "icc_color/precalculation_curves\">\n\ <xf:label>" ); A( _("Curves for Optimization")); A( "</xf:label>\n\ <xf:hint>" ); A( _("Color Transform CLUT's can additionally use curves for special cases")); A( "</xf:hint>\n\ <xf:help>" ); A( _("Little CMS can use curves before and after CLUT's for special cases like gamma encoded values to and from linear gamma values. Performance will suffer.")); A( "</xf:help>\n\ <xf:choices>\n\ <xf:item>\n\ <xf:value>0</xf:value>\n\ <xf:label>"); A( _("[none]")); A( "</xf:label>\n\ </xf:item>\n\ <xf:item>\n\ <xf:value>1</xf:value>\n\ <xf:label>LCMS2_POST+PRE_CURVES</xf:label>\n\ </xf:item>\n\ </xf:choices>\n\ </xf:select1>\n"); A("\ <xf:select1 ref=\"/" OY_TOP_SHARED "/" OY_DOMAIN_INTERNAL "/" OY_TYPE_STD "/" "icc_color/adaption_state\">\n\ <xf:label>" ); A( _("Adaptation State")); A( "</xf:label>\n\ <xf:hint>" ); A( _("Adaptation state for absolute colorimetric intent")); A( "</xf:hint>\n\ <xf:help>" ); A( _("The adaption state should be between 0 and 1.0 and will apply to the absolute colorimetric intent.")); A( "</xf:help>\n\ <xf:choices>\n\ <xf:item>\n\ <xf:value>0.0</xf:value>\n\ <xf:label>0.0</xf:label>\n\ </xf:item>\n\ <xf:item>\n\ <xf:value>1.0</xf:value>\n\ <xf:label>1.0</xf:label>\n\ </xf:item>\n\ </xf:choices>\n\ </xf:select1>\n"); A("\ <xf:select1 ref=\"/" OY_TOP_SHARED "/" OY_DOMAIN_INTERNAL "/" OY_TYPE_STD "/" "icc_color/no_white_on_white_fixup\">\n\ <xf:label>" ); A( _("No White on White Fix")); A( "</xf:label>\n\ <xf:hint>" ); A( _("Skip White Point on White point alignment")); A( "</xf:hint>\n\ <xf:help>" ); A( _("Avoid force of White on White mapping. Default for absolute rendering intent.")); A( "</xf:help>\n\ <xf:choices>\n\ <xf:item>\n\ <xf:value>0</xf:value>\n\ <xf:label>"); A( _("No")); A( "</xf:label>\n\ </xf:item>\n\ <xf:item>\n\ <xf:value>1</xf:value>\n\ <xf:label>"); A( _("Yes")); A( "</xf:label>\n\ </xf:item>\n\ </xf:choices>\n\ </xf:select1>\n\ </xf:group>\n" ); if(allocateFunc && tmp) { char * t = oyStringCopy_( tmp, allocateFunc ); oyFree_m_( tmp ); tmp = t; t = 0; } else return 1; *ui_text = tmp; return 0; } /** \addtogroup misc_modules * @{ */ /** \addtogroup lcm2_misc lcm2 Module * @brief Little CMS 2 ICC style color profiles * * The modules provide ICC style color space creation. * * @{ */ /* OY_LCM2_PARSE_CGATS -------------------------- */ /** Function lcm2ParseCGATS * @brief Parse a CGATS text * * @version Oyranos: 0.9.7 * @since 2017/11/26 (Oyranos: 0.9.7) * @date 2017/11/26 */ oyImage_s* lcm2ParseCGATS ( const char * cgats ) { int error = !cgats; oyImage_s * spec = NULL; if(error) return spec; //cmsCGATS return spec; } #define OY_LCM2_PARSE_CGATS OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH \ "parse_cgats.cgats._" CMM_NICK "._CPU" /** * This function implements oyMOptions_Handle_f. * * @param[in] options expects at least one options * - "cgats": The option shall be a string. * @param[in] command "//" OY_TYPE_STD "/parse_cgats" * @param[out] result will contain a oyImage_s in "colors" * * The Handler uses internally lcm2ParseCGATS(). * * @version Oyranos: 0.9.7 * @since 2017/11/26 (Oyranos: 0.9.7) * @date 2017/11/26 */ int l2cmsMOptions_Handle5 ( oyOptions_s * options, const char * command, oyOptions_s ** result ) { int error = 0; if(oyFilterRegistrationMatch(command,"can_handle", 0)) { if(oyFilterRegistrationMatch(command,"parse_cgats", 0)) { const char * cgats = oyOptions_FindString( options, "cgats", 0 ); if(!cgats) error = 1; return error; } else return -1; } else if(oyFilterRegistrationMatch(command,"parse_cgats.cgats", 0)) { oyImage_s * spec = NULL; const char * cgats = NULL; cgats = oyOptions_FindString( options, "cgats", 0 ); spec = lcm2ParseCGATS( cgats ); if(spec) { oyOption_s * o = oyOption_FromRegistration( ".colors", 0 ); error = oyOption_MoveInStruct( o, (oyStruct_s**) &spec ); if(!*result) *result = oyOptions_New(0); oyOptions_MoveIn( *result, &o, -1 ); } else l2cms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_ "parsing creation failed", OY_DBG_ARGS_ ); } return 0; } /** * This function implements oyCMMinfoGetText_f. * * @version Oyranos: 0.9.7 * @since 2017/06/06 (Oyranos: 0.9.7) * @date 2017/06/06 */ const char * l2cmsInfoGetTextProfileC5(const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { if(strcmp(select, "can_handle")==0) { if(type == oyNAME_NICK) return "check"; else if(type == oyNAME_NAME) return _("check"); else return _("Check if this module can handle a certain command."); } else if(strcmp(select, "parse_cgats")==0) { if(type == oyNAME_NICK) return "parse_cgats"; else if(type == oyNAME_NAME) return _("Parse CGATS text."); else return _("The littleCMS \"parse_cgats\" command lets you parse CGATS files. The filter expects a oyOption_s object with name \"cgats\" containing a string value. The result will appear in \"colors\" as a oyImage_s."); } else if(strcmp(select, "help")==0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Parse CGATS file."); else return _("The littleCMS \"parse_cgats\" command lets you parse CGATS files. See the \"parse_cgats\" info item."); } return 0; } const char *l2cms_texts_parse_cgats[4] = {"can_handle","parse_cgats","help",0}; /** l2cms_api10_cmm5 * @brief Node for Parsing a CGATS text * * littleCMS 2 oyCMMapi10_s implementation * * For the front end API see oyOptions_Handle(). The backend options * are described in l2cmsMOptions_Handle5(). * * @version Oyranos: 0.9.7 * @since 2017/06/05 (Oyranos: 0.9.7) * @date 2017/06/05 */ oyCMMapi10_s_ l2cms_api10_cmm5 = { oyOBJECT_CMM_API10_S, 0,0,0, 0, l2cmsCMMInit, l2cmsCMMMessageFuncSet, OY_LCM2_PARSE_CGATS, CMM_VERSION, CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ l2cmsInfoGetTextProfileC5, /**< getText */ (char**)l2cms_texts_parse_cgats, /**<texts; list of arguments to getText*/ l2cmsMOptions_Handle5 /**< oyMOptions_Handle_f oyMOptions_Handle */ }; /* OY_LCM2_PARSE_CGATS -------------------------- */ const char *l2cms_texts_profile_create[4] = {"can_handle","create_profile","help",0}; /* OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_BRADFORD_REGISTRATION -------------------------- */ /** Function lcm2AbstractWhitePointBradford * @brief create a White point correction profile with Bradford * * Abstract profiles can easily be merged into a multi profile transform. * * @see lcm2CreateAbstractWhitePointProfileBradford() * * @param src_iccXYZ the source white point * @param illu_iccXYZ the illumination white point * @param icc_profile_flags profile flags * * @version Oyranos: 0.9.7 * @date 2018/03/02 * @since 2017/06/05 (Oyranos: 0.9.7) */ oyProfile_s* lcm2AbstractWhitePointBradford ( double * src_iccXYZ, double * illu_iccXYZ, uint32_t icc_profile_flags ) { int error = 0; cmsHPROFILE abs = NULL; char * my_abstract_file_name = NULL; double profile_version = 2.4; oyProfile_s * prof = NULL; l2cms_msg( oyMSG_DBG, NULL, OY_DBG_FORMAT_ "XYZ %g %g %g -> %g %g %g", OY_DBG_ARGS_, src_iccXYZ[0], src_iccXYZ[1], src_iccXYZ[2], illu_iccXYZ[0], illu_iccXYZ[1], illu_iccXYZ[2] ); if(icc_profile_flags & OY_ICC_VERSION_2) profile_version = 4.3; error = lcm2CreateAbstractWhitePointProfileBradford( src_iccXYZ, illu_iccXYZ, 15, profile_version, & my_abstract_file_name, &abs ); if(error || !abs) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)abs, OY_DBG_FORMAT_ " " "failed to build white point effect: %s", OY_DBG_ARGS_, oyNoEmptyString_m_(my_abstract_file_name) ); } else { void * data; size_t size = 0; data = lcm2WriteProfileToMem( abs, &size, oyAllocateFunc_ ); prof = oyProfile_FromMem( size, data, 0,0 ); if(data && size) oyFree_m_( data ); } if(oy_debug && getenv("OY_DEBUG_WRITE")) { char * t = 0; oyStringAddPrintf( &t, 0,0, "%04d-%s-abstract-wtptB[%d]", ++oy_debug_write_id,CMM_NICK,oyStruct_GetId((oyStruct_s*)prof)); lcm2WriteProfileToFile( abs, t, NULL,NULL ); oyFree_m_(t); } oyFree_m_(my_abstract_file_name); if(abs) l2cmsCloseProfile( abs ); return prof; } #define OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_BRADFORD_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH \ "create_profile.white_point_adjust.bradford.icc._" CMM_NICK "._CPU" /** * This function implements oyMOptions_Handle_f. * * @param[in] options expects at least two options * - "src_iccXYZ": The option shall be a double[3] array. * - "illu_iccXYZ": The option shall be a double[3] array. * - "icc_profile_flags" ::OY_ICC_VERSION_2 and ::OY_ICC_VERSION_4 let select version 2 and 4 profiles separately. * This option shall be a integer. * @param[in] command "//" OY_TYPE_STD "/create_profile.white_point_adjust.bradford" * @param[out] result will contain a oyProfile_s in "icc_profile.create_profile.white_point_adjust.bradford" * * This function uses internally lcm2AbstractWhitePoint(). * * @version Oyranos: 0.9.7 * @date 2018/03/02 * @since 2017/06/05 (Oyranos: 0.9.7) */ int l2cmsMOptions_Handle4 ( oyOptions_s * options, const char * command, oyOptions_s ** result ) { int error = 0; double src_iccXYZ[3] = {-1,-1,-1}, illu_iccXYZ[3] = {-1,-1,-1}; if(oyFilterRegistrationMatch(command,"can_handle", 0)) { if(oyFilterRegistrationMatch(command,"create_profile.white_point_adjust.bradford", 0)) { error = !(oyOptions_FindDouble( options, "src_iccXYZ", 2, &src_iccXYZ[2] ) == 0 && oyOptions_FindDouble( options, "illu_iccXYZ", 2, &illu_iccXYZ[2] ) == 0 ); return error; } else return -1; } else if(oyFilterRegistrationMatch(command,"create_profile.white_point_adjust.bradford", 0)) { int32_t icc_profile_flags = 0; oyOptions_FindInt( options, "icc_profile_flags", 0, &icc_profile_flags ); oyProfile_s * p = NULL; if( oyOptions_FindDouble( options, "src_iccXYZ", 0, &src_iccXYZ[0] ) == 0 && oyOptions_FindDouble( options, "src_iccXYZ", 1, &src_iccXYZ[1] ) == 0 && oyOptions_FindDouble( options, "src_iccXYZ", 2, &src_iccXYZ[2] ) == 0 && oyOptions_FindDouble( options, "illu_iccXYZ", 0, &illu_iccXYZ[0] ) == 0 && oyOptions_FindDouble( options, "illu_iccXYZ", 1, &illu_iccXYZ[1] ) == 0 && oyOptions_FindDouble( options, "illu_iccXYZ", 2, &illu_iccXYZ[2] ) == 0 ) p = lcm2AbstractWhitePointBradford( src_iccXYZ, illu_iccXYZ, icc_profile_flags ); if(p) { oyOption_s * o = oyOption_FromRegistration( OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_BRADFORD_REGISTRATION ".icc_profile", 0 ); error = oyOption_MoveInStruct( o, (oyStruct_s**) &p ); if(!*result) *result = oyOptions_New(0); oyOptions_MoveIn( *result, &o, -1 ); } else l2cms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_ "effect creation failed", OY_DBG_ARGS_ ); } return 0; } /** * This function implements oyCMMinfoGetText_f. * * @version Oyranos: 0.9.7 * @date 2018/03/02 * @since 2017/06/06 (Oyranos: 0.9.7) */ const char * l2cmsInfoGetTextProfile4(const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { if(strcmp(select, "can_handle")==0) { if(type == oyNAME_NICK) return "check"; else if(type == oyNAME_NAME) return _("check"); else return _("Check if this module can handle a certain command."); } else if(strcmp(select, "create_profile")==0) { if(type == oyNAME_NICK) return "white_point_adjust.bradford"; else if(type == oyNAME_NAME) return _("Create a ICC white point profile."); else return _("The littleCMS \"create_profile.white_point_adjust.bradford\" command lets you create ICC abstract profiles from CIE*XYZ coordinates for white point adjustment. The filter expects a oyOption_s object with name \"src_iccXYZ\" and \"illu_iccXYZ\" each containing a double triple value in range 0.0 - 2.0. The result will appear in \"icc_profile\" with the additional attributes \"create_profile.white_point_adjust.bradford\" as a oyProfile_s object."); } else if(strcmp(select, "help")==0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Create a ICC abstract white point effect profile."); else return _("The littleCMS \"create_profile.white_point_adjust.bradford\" command lets you create ICC abstract profiles from a pair of CIE*XYZ coordinates. See the \"create_profile\" info item."); } return 0; } /** l2cms_api10_cmm4 * @brief Node for Creating White Point Effect Profiles * * littleCMS 2 oyCMMapi10_s implementation * * For the front end API see oyOptions_Handle(). The backend options * are described in l2cmsMOptions_Handle4(). * * @version Oyranos: 0.9.7 * @date 2018/03/02 * @since 2018/03/02 (Oyranos: 0.9.7) */ oyCMMapi10_s_ l2cms_api10_cmm4 = { oyOBJECT_CMM_API10_S, 0,0,0, (oyCMMapi_s*) & l2cms_api10_cmm5, l2cmsCMMInit, l2cmsCMMMessageFuncSet, OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_BRADFORD_REGISTRATION, CMM_VERSION, CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ l2cmsInfoGetTextProfile4, /**< getText */ (char**)l2cms_texts_profile_create, /**<texts; list of arguments to getText*/ l2cmsMOptions_Handle4 /**< oyMOptions_Handle_f oyMOptions_Handle */ }; /* OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_BRADFORD_REGISTRATION -------------------------- */ /* OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_LAB_REGISTRATION -------------------------- */ /** Function lcm2AbstractWhitePoint * @brief create a White point correction profile * * Abstract profiles can easily be merged into a multi profile transform. * * @see lcm2CreateAbstractWhitePointProfile() * * @param cie_a the white point coordinate * @param cie_b the white point coordinate * @param icc_profile_flags profile flags * * @version Oyranos: 0.9.7 * @since 2017/06/05 (Oyranos: 0.9.7) * @date 2018/01/17 */ oyProfile_s* lcm2AbstractWhitePoint ( double cie_a, double cie_b, uint32_t icc_profile_flags ) { int error = 0; cmsHPROFILE abs = NULL; char * my_abstract_file_name = NULL; double profile_version = 2.4; oyProfile_s * prof = NULL; l2cms_msg( oyMSG_DBG, NULL, OY_DBG_FORMAT_ "cie_ab %g %g", OY_DBG_ARGS_, cie_a, cie_b ); if(icc_profile_flags & OY_ICC_VERSION_2) profile_version = 4.3; error = lcm2CreateAbstractWhitePointProfileLab( cie_a, cie_b, 15, profile_version, &my_abstract_file_name, &abs ); if(error || !abs) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)abs, OY_DBG_FORMAT_ " " "failed to build white point effect: %s", OY_DBG_ARGS_, oyNoEmptyString_m_(my_abstract_file_name) ); } else { void * data; size_t size = 0; data = lcm2WriteProfileToMem( abs, &size, oyAllocateFunc_ ); prof = oyProfile_FromMem( size, data, 0,0 ); if(data && size) oyFree_m_( data ); } if(oy_debug && getenv("OY_DEBUG_WRITE")) { char * t = 0; oyStringAddPrintf( &t, 0,0, "%04d-%s-abstract-wtptL[%d]", ++oy_debug_write_id,CMM_NICK,oyStruct_GetId((oyStruct_s*)prof)); lcm2WriteProfileToFile( abs, t, NULL,NULL ); oyFree_m_(t); } oyFree_m_(my_abstract_file_name); if(abs) l2cmsCloseProfile( abs ); return prof; } #define OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_LAB_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH \ "create_profile.white_point_adjust.lab.icc._" CMM_NICK "._CPU" /** * This function implements oyMOptions_Handle_f. * * @param[in] options expects at least two options * - "cie_a": The option shall be a double. * - "cie_b": The option shall be a double. * - "icc_profile_flags" ::OY_ICC_VERSION_2 and ::OY_ICC_VERSION_4 let select version 2 and 4 profiles separately. * This option shall be a integer. * @param[in] command "//" OY_TYPE_STD "/create_profile.white_point_adjust.lab" * @param[out] result will contain a oyProfile_s in "icc_profile.create_profile.white_point_adjust.lab" * * This function uses internally lcm2AbstractWhitePoint(). * * @version Oyranos: 0.9.7 * @since 2017/06/05 (Oyranos: 0.9.7) * @date 2017/06/05 */ int l2cmsMOptions_Handle3 ( oyOptions_s * options, const char * command, oyOptions_s ** result ) { int error = 0; double cie_a = -1, cie_b = -1; if(oyFilterRegistrationMatch(command,"can_handle", 0)) { if(oyFilterRegistrationMatch(command,"create_profile.white_point_adjust.lab", 0)) { error = oyOptions_FindDouble( options, "cie_a", 0, &cie_a ); return error; } else return -1; } else if(oyFilterRegistrationMatch(command,"create_profile.white_point_adjust.lab", 0)) { int32_t icc_profile_flags = 0; oyOptions_FindInt( options, "icc_profile_flags", 0, &icc_profile_flags ); oyProfile_s * p = NULL; if( oyOptions_FindDouble( options, "cie_a", 0, &cie_a ) == 0 && oyOptions_FindDouble( options, "cie_b", 0, &cie_b ) == 0 ) p = lcm2AbstractWhitePoint( cie_a, cie_b, icc_profile_flags ); if(p) { oyOption_s * o = oyOption_FromRegistration( OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_LAB_REGISTRATION ".icc_profile", 0 ); error = oyOption_MoveInStruct( o, (oyStruct_s**) &p ); if(!*result) *result = oyOptions_New(0); oyOptions_MoveIn( *result, &o, -1 ); } else l2cms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_ "effect creation failed", OY_DBG_ARGS_ ); } return 0; } /** * This function implements oyCMMinfoGetText_f. * * @version Oyranos: 0.9.7 * @date 2018/03/01 * @since 2017/06/06 (Oyranos: 0.9.7) */ const char * l2cmsInfoGetTextProfileC3(const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { if(strcmp(select, "can_handle")==0) { if(type == oyNAME_NICK) return "check"; else if(type == oyNAME_NAME) return _("check"); else return _("Check if this module can handle a certain command."); } else if(strcmp(select, "create_profile")==0) { if(type == oyNAME_NICK) return "white_point_adjust.lab"; else if(type == oyNAME_NAME) return _("Create a ICC white point profile."); else return _("The littleCMS \"create_profile.white_point_adjust.lab\" command lets you create ICC abstract profiles from CIE*ab coordinates for white point adjustment. The filter expects a oyOption_s object with name \"cie_a\" and \"cie_b\" each containing a double value in range -0.5 - 0.5. The result will appear in \"icc_profile\" with the additional attributes \"create_profile.white_point_adjust.lab\" as a oyProfile_s object."); } else if(strcmp(select, "help")==0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Create a ICC abstract white point effect profile."); else return _("The littleCMS \"create_profile.white_point_adjust.lab\" command lets you create ICC abstract profiles from CIE*ab coordinates. See the \"create_profile\" info item."); } return 0; } /** l2cms_api10_cmm3 * @brief Node for Creating White Point Effect Profiles * * littleCMS 2 oyCMMapi10_s implementation * * For the front end API see oyOptions_Handle(). The backend options * are described in l2cmsMOptions_Handle3(). * * @version Oyranos: 0.9.7 * @since 2017/06/05 (Oyranos: 0.9.7) * @date 2017/06/05 */ oyCMMapi10_s_ l2cms_api10_cmm3 = { oyOBJECT_CMM_API10_S, 0,0,0, (oyCMMapi_s*) & l2cms_api10_cmm4, l2cmsCMMInit, l2cmsCMMMessageFuncSet, OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_LAB_REGISTRATION, CMM_VERSION, CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ l2cmsInfoGetTextProfileC3, /**< getText */ (char**)l2cms_texts_profile_create, /**<texts; list of arguments to getText*/ l2cmsMOptions_Handle3 /**< oyMOptions_Handle_f oyMOptions_Handle */ }; /* OY_LCM2_CREATE_ABSTRACT_WHITE_POINT_LAB_REGISTRATION -------------------------- */ /* OY_LCM2_CREATE_ABSTRACT_PROOFING_REGISTRATION -------------------------- */ /** Function l2cmsGamutCheckAbstract * @brief convert a proofing profile into a abstract one * * Abstract profiles can easily be merged into a multi profile transform. * * @param proof the proofing profile; owned by the * function * @param flags the gamut check and softproof flags * @param intent rendering intent * @param intent_proof proof rendering intent * @param icc_profile_flags profile flags * * @version Oyranos: 0.1.11 * @since 2009/11/04 (Oyranos: 0.1.10) * @date 2010/08/14 */ cmsHPROFILE l2cmsGamutCheckAbstract ( oyProfile_s * proof, cmsUInt32Number flags, int intent, int intent_proof, uint32_t icc_profile_flags ) { int error = 0; #if LCMS_VERSION >= 2060 cmsContext tc = l2cmsCreateContext( NULL, NULL ); /* threading context */ l2cmsSetLogErrorHandlerTHR( tc, l2cmsErrorHandlerFunction ); #else void * tc = NULL; #endif cmsHPROFILE gmt = 0, hLab = 0, hproof = 0; cmsHTRANSFORM tr = 0; cmsHTRANSFORM ptr[2] = {0,0}; l2cms_msg( oyMSG_DBG, (oyStruct_s*)proof, OY_DBG_FORMAT_ "softproofing %d gamutcheck %d intent %d intent_proof %d", OY_DBG_ARGS_, flags & cmsFLAGS_SOFTPROOFING, flags & cmsFLAGS_GAMUTCHECK, intent, intent_proof ); if(!(flags & cmsFLAGS_GAMUTCHECK || flags & cmsFLAGS_SOFTPROOFING)) return gmt; hLab = l2cmsCreateLab4ProfileTHR(tc, l2cmsD50_xyY()); #if LCMS_VERSION < 2060 hproof = l2cmsAddProfile( proof ); #else { const char * fn = oyProfile_GetFileName( proof, -1 ); hproof = l2cmsOpenProfileFromFileTHR( tc, fn, "r" ); } #endif if(!hLab || !hproof) { l2cms_msg( oyMSG_ERROR, (oyStruct_s*)proof, OY_DBG_FORMAT_ "hLab or hproof failed", OY_DBG_ARGS_); goto clean; } tr = l2cmsCreateProofingTransformTHR ( tc, hLab, TYPE_Lab_FLT, hLab, TYPE_Lab_FLT, hproof, intent, /* TODO The INTENT_ABSOLUTE_COLORIMETRIC should lead to paper simulation, but does take white point into account. Do we want this? */ intent_proof, flags | cmsFLAGS_KEEP_SEQUENCE); if(!tr) { l2cms_msg( oyMSG_ERROR, (oyStruct_s*)proof, OY_DBG_FORMAT_ "cmsCreateProofingTransform() failed", OY_DBG_ARGS_); error = 1; } ptr[0] = tr; ptr[1] = flags & cmsFLAGS_GAMUTCHECK ? (oyPointer)1 : 0; if(!error) { const char * proof_meta[] = { "EFFECT_,CMF_", "EFFECT_class", "proof,saturation,contrast,atom", "EFFECT_saturation", "yes,reduce", "EFFECT_lightness", "no", "EFFECT_contrast", "yes,reduce", "CMF_binary", "lcm2profile", "CMF_version", "0.9.7", "CMF_product", "Oyranos", 0,0 }; const char * desc = oyProfile_GetText( proof, oyNAME_DESCRIPTION ); lcm2CreateAbstractProfile ( lcm2SamplerProof, ptr, "*lab", // CIE*Lab l2cmsPROOF_LUT_GRID_RASTER, icc_profile_flags & OY_ICC_VERSION_2 ? 2.4 : 4.2, "proofing", NULL, "proofing", "", "", ICC_2011_LICENSE, desc, "http://www.oyranos.org", proof_meta, &gmt ); } if(!gmt) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)proof, OY_DBG_FORMAT_ " " "failed to build proof", OY_DBG_ARGS_ ); goto clean; } if(oy_debug && getenv("OY_DEBUG_WRITE")) { char * t = 0; oyStringAddPrintf( &t, 0,0, "%04d-%s-abstract-proof[%d]", ++oy_debug_write_id,CMM_NICK,oyStruct_GetId((oyStruct_s*)proof)); lcm2WriteProfileToFile( gmt, t, NULL,NULL ); oyFree_m_(t); } clean: if(hLab) { l2cmsCloseProfile( hLab ); hLab = 0; } if(tr) { l2cmsDeleteTransform( tr ); tr = 0; } return gmt; } /** * This function implements oyMOptions_Handle_f. * * @param[in] options expects at least one option * - "proofing_profile": The option shall be a oyProfile_s. * - "icc_profile_flags" ::OY_ICC_VERSION_2 and ::OY_ICC_VERSION_4 let select version 2 and 4 profiles separately. * This option shall be a integer. * @param[in] command "//" OY_TYPE_STD "/create_profile.proofing_profile" * @param[out] result will contain a oyProfile_s in "icc_profile.create_profile.proofing_profile" * * This function uses internally l2cmsAddProofProfile(). * * @version Oyranos: 0.3.0 * @since 2011/02/21 (Oyranos: 0.3.0) * @date 2011/02/21 */ int l2cmsMOptions_Handle2 ( oyOptions_s * options, const char * command, oyOptions_s ** result ) { int error = 0; oyProfile_s * prof = 0, * p = 0; if(oyFilterRegistrationMatch(command,"can_handle", 0)) { if(oyFilterRegistrationMatch(command,"create_profile", 0)) { p = (oyProfile_s*) oyOptions_GetType( options,-1, "proofing_profile", oyOBJECT_PROFILE_S ); if(!p) { error = -1; } oyProfile_Release( &p ); return error; } else return -1; } else if(oyFilterRegistrationMatch(command,"create_profile", 0)) { int32_t icc_profile_flags = 0; oyOptions_FindInt( options, "icc_profile_flags", 0, &icc_profile_flags ); p = (oyProfile_s*) oyOptions_GetType( options,-1, "proofing_profile", oyOBJECT_PROFILE_S ); if(p) { int intent = l2cmsIntentFromOptions( options,0 ), intent_proof = l2cmsIntentFromOptions( options,1 ), flags = l2cmsFlagsFromOptions( options ); oyOption_s * o; l2cmsProfileWrap_s * wrap = l2cmsAddProofProfile( p, flags | cmsFLAGS_SOFTPROOFING, intent, intent_proof, icc_profile_flags ); oyProfile_Release( &p ); prof = oyProfile_FromMem( wrap->size, wrap->block, 0, 0 ); o = oyOption_FromRegistration( OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "icc_profile.create_profile.proofing_effect._" CMM_NICK, 0 ); error = oyOption_MoveInStruct( o, (oyStruct_s**) &prof ); if(!*result) *result = oyOptions_New(0); oyOptions_MoveIn( *result, &o, -1 ); } else l2cms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_ " " "no option \"proofing_effect\" of type oyProfile_s found", OY_DBG_ARGS_ ); } return 0; } /** * This function implements oyCMMinfoGetText_f. * * @version Oyranos: 0.3.0 * @since 2011/02/21 (Oyranos: 0.3.0) * @date 2011/02/21 */ const char * l2cmsInfoGetTextProfileC2(const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { if(strcmp(select, "can_handle")==0) { if(type == oyNAME_NICK) return "check"; else if(type == oyNAME_NAME) return _("check"); else return _("Check if this module can handle a certain command."); } else if(strcmp(select, "create_profile")==0) { if(type == oyNAME_NICK) return "proofing_effect"; else if(type == oyNAME_NAME) return _("Create a ICC abstract proofing profile."); else return _("The littleCMS \"create_profile.proofing_effect\" command lets you create ICC abstract profiles from a given ICC profile for proofing. The filter expects a oyOption_s object with name \"proofing_profile\" containing a oyProfile_s as value. The options \"rendering_intent\", \"rendering_intent_proof\", \"rendering_bpc\", \"rendering_gamut_warning\", \"precalculation\", \"precalculation_curves\", \"cmyk_cmyk_black_preservation\", \"adaption_state\" and \"no_white_on_white_fixup\" are honoured. The result will appear in \"icc_profile\" with the additional attributes \"create_profile.proofing_effect\" as a oyProfile_s object."); } else if(strcmp(select, "help")==0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Create a ICC proofing profile."); else return _("The littleCMS \"create_profile.proofing_effect\" command lets you create ICC abstract profiles from some given ICC profile. See the \"create_profile\" info item."); } return 0; } #define OY_LCM2_CREATE_ABSTRACT_PROOFING_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH \ "create_profile.proofing_effect.icc._" CMM_NICK "._CPU" /** l2cms_api10_cmm2 * @brief Node for Creating Proofing Effect Profiles * * littleCMS 2 oyCMMapi10_s implementation * * For the front end API see oyOptions_Handle(). The backend options * are described in l2cmsMOptions_Handle2(). * * @version Oyranos: 0.3.0 * @since 2011/02/21 (Oyranos: 0.3.0) * @date 2011/02/21 */ oyCMMapi10_s_ l2cms_api10_cmm2 = { oyOBJECT_CMM_API10_S, 0,0,0, (oyCMMapi_s*) & l2cms_api10_cmm3, l2cmsCMMInit, l2cmsCMMMessageFuncSet, OY_LCM2_CREATE_ABSTRACT_PROOFING_REGISTRATION, CMM_VERSION, CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ l2cmsInfoGetTextProfileC2, /**< getText */ (char**)l2cms_texts_profile_create, /**<texts; list of arguments to getText*/ l2cmsMOptions_Handle2 /**< oyMOptions_Handle_f oyMOptions_Handle */ }; /* OY_LCM2_CREATE_ABSTRACT_PROOFING_REGISTRATION -------------------------- */ /* OY_LCM2_CREATE_MATRIX_REGISTRATION ------------------------------------- */ /** Function l2cmsCreateICCMatrixProfile * @brief create a profile from primaries, white point and one gamma value * * Used for ICC from EDID, Camera RAW etc. Marti calls these matrix/shaper. * * @version Oyranos: 0.9.6 * @date 2014/04/07 * @since 2009/10/24 (Oyranos: 0.1.10) */ oyProfile_s * l2cmsCreateICCMatrixProfile ( float gamma, float rx, float ry, float gx, float gy, float bx, float by, float wx, float wy, int icc_profile_flags ) { cmsHPROFILE lp = 0; size_t size = 0; char * data = 0; int error = 0; oyProfile_s * prof = 0; lp = lcm2CreateICCMatrixProfile2( gamma, rx,ry, gx,gy, bx,by, wx,wy ); if(icc_profile_flags & OY_ICC_VERSION_2) l2cmsSetProfileVersion(lp, 2.4); data = lcm2WriteProfileToMem( lp, &size, oyAllocateFunc_ ); if(!size) l2cms_msg( oyMSG_WARN,0, OY_DBG_FORMAT_ "l2cmsSaveProfileToMem failed for: red: %g %g green: %g %g blue: %g %g white: %g %g gamma: %g", OY_DBG_ARGS_, rx,ry, gx,gy, bx,by, wx,wy, gamma ); l2cmsCloseProfile( lp ); prof = oyProfile_FromMem( size, data, 0,0 ); error = oyProfile_AddTagText( prof, icSigCopyrightTag, "no copyright; use freely" ); if(error) WARNc2_S("%s %d", _("found issues"),error); oyDeAllocateFunc_( data ); size = 0; return prof; } /** * This function implements oyMOptions_Handle_f. * * @param[in] options expects at least one option * - "color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma" * The option shall contain 9 double values. * - "icc_profile_flags" ::OY_ICC_VERSION_2 and ::OY_ICC_VERSION_4 let select version 2 and 4 profiles separately. * This option shall be a integer. * @param[in] command "//" OY_TYPE_STD "/create_profile.color_matrix.icc" * @param[out] result will contain a oyProfile_s in "icc_profile.create_profile.color_matrix" * * This function uses internally l2cmsCreateICCMatrixProfile(). * * @version Oyranos: 0.1.10 * @since 2009/12/11 (Oyranos: 0.1.10) * @date 2009/12/11 */ int l2cmsMOptions_Handle ( oyOptions_s * options, const char * command, oyOptions_s ** result ) { oyOption_s * o = 0; oyProfile_s * prof = 0; int error = 0; if(oyFilterRegistrationMatch(command,"can_handle", 0)) { if(oyFilterRegistrationMatch(command,"create_profile", 0)) { double val = 0.0; o = oyOptions_Find( options, "color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma", oyNAME_PATTERN ); error = oyOptions_FindDouble( options, "color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma", 8, &val ); if(!o) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_ " " "no option \"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma\" found", OY_DBG_ARGS_ ); error = 1; } else if( error != 0 ) { l2cms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_" " "option \"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma\" %s", OY_DBG_ARGS_, (error < 0) ? "contains less than 9 required values" : "access returned with error" ); } oyOption_Release( &o ); return error; } else return 1; } else if(oyFilterRegistrationMatch(command,"create_profile", 0)) { o = oyOptions_Find( options, "color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma", oyNAME_PATTERN ); if(o) { int32_t icc_profile_flags = 0; oyOptions_FindInt( options, "icc_profile_flags", 0, &icc_profile_flags ); prof = l2cmsCreateICCMatrixProfile ( oyOption_GetValueDouble(o,8), oyOption_GetValueDouble(o,0), oyOption_GetValueDouble(o,1), oyOption_GetValueDouble(o,2), oyOption_GetValueDouble(o,3), oyOption_GetValueDouble(o,4), oyOption_GetValueDouble(o,5), oyOption_GetValueDouble(o,6), oyOption_GetValueDouble(o,7), icc_profile_flags ); oyOption_Release( &o ); o = oyOption_FromRegistration( OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "icc_profile.create_profile.color_matrix._" CMM_NICK, 0 ); error = oyOption_MoveInStruct( o, (oyStruct_s**) &prof ); if(!*result) *result = oyOptions_New(0); oyOptions_MoveIn( *result, &o, -1 ); } } return 0; } /** * This function implements oyCMMinfoGetText_f. * * @version Oyranos: 0.1.10 * @since 2009/12/11 (Oyranos: 0.1.10) * @date 2009/12/11 */ const char * l2cmsInfoGetTextProfileC ( const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { if(strcmp(select, "can_handle")==0) { if(type == oyNAME_NICK) return "check"; else if(type == oyNAME_NAME) return _("check"); else return _("Check if this module can handle a certain command."); } else if(strcmp(select, "create_profile")==0) { if(type == oyNAME_NICK) return "create_profile"; else if(type == oyNAME_NAME) return _("Create a ICC matrix profile."); else return _("The littleCMS \"create_profile.color_matrix\" command lets you create ICC profiles from some given colorimetric coordinates. The filter expects a oyOption_s object with name \"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma\" containing 9 floats in the order of CIE*x for red, CIE*y for red, CIE*x for green, CIE*y for green, CIE*x for blue, CIE*y for blue, CIE*x for white, CIE*y for white and a gamma value."); } else if(strcmp(select, "help")==0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Create a ICC matrix profile."); else return _("The littleCMS \"create_profile.color_matrix\" command lets you create ICC profiles from some given colorimetric coordinates. See the \"create_profile\" info item."); } return 0; } #define OY_LCM2_CREATE_MATRIX_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH \ "create_profile.color_matrix.icc._" CMM_NICK "._CPU" /** l2cms_api10_cmm * @brief Node for Creating simple Color Matrix Profiles * * littleCMS 2 oyCMMapi10_s implementation * * For the front end API see oyOptions_Handle(). The backend options * are described in l2cmsMOptions_Handle(). * * @version Oyranos: 0.1.10 * @since 2009/12/11 (Oyranos: 0.1.10) * @date 2009/12/11 */ oyCMMapi10_s_ l2cms_api10_cmm = { oyOBJECT_CMM_API10_S, 0,0,0, (oyCMMapi_s*) & l2cms_api10_cmm2, l2cmsCMMInit, l2cmsCMMMessageFuncSet, OY_LCM2_CREATE_MATRIX_REGISTRATION, CMM_VERSION, CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ l2cmsInfoGetTextProfileC, /**< getText */ (char**)l2cms_texts_profile_create, /**<texts; list of arguments to getText*/ l2cmsMOptions_Handle /**< oyMOptions_Handle_f oyMOptions_Handle */ }; /* OY_LCM2_CREATE_MATRIX_REGISTRATION ------------------------------------- */ /** @} *//* lcm2_misc */ /** @} *//* misc_modules */ /** \addtogroup graph_modules * @{ */ /** \addtogroup lcm2_graph lcm2 Module * @brief Little CMS 2 ICC style color conversion * * The modules provide ICC style color space converison and data processing. * * @{ */ #define OY_LCM2_DATA_CONVERT_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH \ "icc_color._" CMM_NICK "._CPU." oyCOLOR_ICC_DEVICE_LINK "_" l2cmsTRANSFORM /** l2cms_api6_cmm * @brief Node for Converting a Device Link into a lcms2 CMM Context * * littleCMS oyCMMapi6_s implementation * * a filter providing CMM API's * * This Node type uses internally l2cmsModuleData_Convert(). * * @version Oyranos: 0.1.10 * @since 2008/12/28 (Oyranos: 0.1.10) * @date 2008/12/28 */ oyCMMapi6_s_ l2cms_api6_cmm = { oyOBJECT_CMM_API6_S, 0,0,0, (oyCMMapi_s*) & l2cms_api10_cmm, l2cmsCMMInit, l2cmsCMMMessageFuncSet, OY_LCM2_DATA_CONVERT_REGISTRATION, CMM_VERSION, CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ oyCOLOR_ICC_DEVICE_LINK, /* data_type_in, "oyDL" */ l2cmsTRANSFORM, /* data_type_out, l2cmsTRANSFORM */ l2cmsModuleData_Convert /* oyModuleData_Convert_f oyModuleData_Convert */ }; /** l2cms_api7_cmm * @brief lcms2 ICC CMM Pixel Processor Engine Node * * littleCMS oyCMMapi7_s implementation * * a filter providing CMM API's * * @version Oyranos: 0.1.10 * @since 2008/12/27 (Oyranos: 0.1.10) * @date 2008/12/27 */ oyCMMapi7_s_ l2cms_api7_cmm = { oyOBJECT_CMM_API7_S, 0,0,0, (oyCMMapi_s*) & l2cms_api6_cmm, l2cmsCMMInit, l2cmsCMMMessageFuncSet, OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "icc_color._" CMM_NICK "._icc_version_2._icc_version_4._CPU._NOACCEL", CMM_VERSION, CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ l2cmsFilterPlug_CmmIccRun, /* oyCMMFilterPlug_Run_f */ l2cmsTRANSFORM, /* data_type, l2cmsTRANSFORM */ (oyConnector_s**) l2cms_cmmIccPlug_connectors,/* plugs */ 1, /* plugs_n */ 0, /* plugs_last_add */ (oyConnector_s**) l2cms_cmmIccSocket_connectors, /* sockets */ 1, /* sockets_n */ 0, /* sockets_last_add */ NULL /* properties */ }; /** * This function implements oyCMMGetText_f. * * @version Oyranos: 0.1.10 * @since 2009/12/22 (Oyranos: 0.1.10) * @date 2009/12/22 */ const char * l2cmsApi4UiGetText ( const char * select, oyNAME_e type, oyStruct_s * context ) { static char * category = 0; if(strcmp(select,"name") == 0 || strcmp(select,"help") == 0) { return l2cmsInfoGetText( select, type, context ); } else if(strcmp(select,"category")) { if(!category) { STRING_ADD( category, _("Color") ); STRING_ADD( category, _("/") ); /* CMM: abbreviation for Color Matching Module */ STRING_ADD( category, _("CMM") ); STRING_ADD( category, _("/") ); STRING_ADD( category, _("littleCMS") ); } if(type == oyNAME_NICK) return "category"; else if(type == oyNAME_NAME) return category; else return category; } return 0; } const char * l2cms_api4_ui_texts[] = {"name", "category", "help", 0}; /** l2cms_api4_ui * @brief lcms2 ICC CMM Node UI * * l2cms oyCMMapi4_s::ui implementation * * The UI for l2cms. * * @version Oyranos: 0.1.10 * @since 2009/09/09 (Oyranos: 0.1.10) * @date 2009/09/09 */ oyCMMui_s_ l2cms_api4_ui = { oyOBJECT_CMM_DATA_TYPES_S, /**< oyOBJECT_e type; */ 0,0,0, /* unused oyStruct_s fields; keep to zero */ CMM_VERSION, /**< int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ l2cmsFilter_CmmIccValidateOptions, /* oyCMMFilter_ValidateOptions_f */ l2cmsWidgetEvent, /* oyWidgetEvent_f */ "Color/CMM/littleCMS2", /* category */ l2cms_extra_options, /* const char * options */ l2cmsGetOptionsUI, /* oyCMMuiGet_f oyCMMuiGet */ l2cmsApi4UiGetText, /* oyCMMGetText_f getText */ l2cms_api4_ui_texts,/* const char ** texts */ (oyCMMapiFilter_s*)&l2cms_api4_cmm /* oyCMMapiFilter_s * parent */ }; /** l2cms_api4_cmm * @brief lcms2 ICC CMM Context Setup and UI Node * * littleCMS oyCMMapi4_s implementation for color context setup * * A filter providing CMM API's. It creates specifically a ICC * device link profile for exchange with data processing CMM engines. * * This node type uses internally l2cmsFilterNode_CmmIccContextToMem(). * * @version Oyranos: 0.1.8 * @since 2008/07/18 (Oyranos: 0.1.8) * @date 2008/07/18 */ oyCMMapi4_s_ l2cms_api4_cmm = { oyOBJECT_CMM_API4_S, 0,0,0, (oyCMMapi_s*) & l2cms_api7_cmm, l2cmsCMMInit, l2cmsCMMMessageFuncSet, OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "icc_color._" CMM_NICK "._icc_version_2._icc_version_4._CPU._NOACCEL._effect", CMM_VERSION, CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ l2cmsFilterNode_CmmIccContextToMem, /* oyCMMFilterNode_ContextToMem_f */ l2cmsFilterNode_GetText, /* oyCMMFilterNode_GetText_f */ oyCOLOR_ICC_DEVICE_LINK, /* context data_type */ &l2cms_api4_ui /**< oyCMMui_s *ui */ }; /** @} *//* lcm2_graph */ /** @} *//* graph_modules */ /** * This function implements oyCMMinfoGetText_f. * * @version Oyranos: 0.1.10 * @since 2008/12/23 (Oyranos: 0.1.10) * @date 2008/12/30 */ const char * l2cmsInfoGetText ( const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { if(strcmp(select, "name")==0) { if(type == oyNAME_NICK) return CMM_NICK; else if(type == oyNAME_NAME) return _("Little CMS 2"); else return _("LittleCMS 2 is a CMM, a color management engine; it implements fast transforms between ICC profiles. \"Little\" stands for its small overhead. With a typical footprint of about 100K including C runtime, you can color-enable your application without the pain of ActiveX, OCX, redistributables or binaries of any kind. We are using little cms in several commercial projects, however, we are offering lcms library free for anybody under an extremely liberal open source license."); } else if(strcmp(select, "manufacturer")==0) { if(type == oyNAME_NICK) return "Marti"; else if(type == oyNAME_NAME) return "Marti Maria"; else return _("littleCMS 2 project; www: http://www.littlecms.com; support/email: support@littlecms.com; sources: http://www.littlecms.com/downloads.htm; Oyranos wrapper: Kai-Uwe Behrmann for the Oyranos project"); } else if(strcmp(select, "copyright")==0) { if(type == oyNAME_NICK) return "MIT"; else if(type == oyNAME_NAME) return _("Copyright (c) 1998-2016 Marti Maria Saguer; MIT"); else return _("MIT license: http://www.opensource.org/licenses/mit-license.php"); } else if(strcmp(select, "help")==0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("The lcms \"color_icc\" filter is a one dimensional color conversion filter. It can both create a color conversion context, some precalculated for processing speed up, and the color conversion with the help of that context. The adaption part of this filter transforms the Oyranos color context, which is ICC device link based, to the internal lcms format."); else return _("The following options are available to create color contexts:\n \"profiles_simulation\", a option of type oyProfiles_s, can contain device profiles for proofing.\n \"profiles_effect\", a option of type oyProfiles_s, can contain abstract color profiles.\n The following Oyranos options are supported: \"rendering_gamut_warning\", \"rendering_intent_proof\", \"rendering_bpc\", \"rendering_intent\", \"proof_soft\" and \"proof_hard\".\n The additional lcms option is supported \"cmyk_cmyk_black_preservation\" [0 - none; 1 - LCMS_PRESERVE_PURE_K; 2 - LCMS_PRESERVE_K_PLANE], \"precalculation\": [0 - normal; 1 - cmsFLAGS_NOOPTIMIZE; 2 - cmsFLAGS_HIGHRESPRECALC, 3 - cmsFLAGS_LOWRESPRECALC], \"precalculation_curves\": [0 - none; 1 - cmsFLAGS_CLUT_POST_LINEARIZATION + cmsFLAGS_CLUT_PRE_LINEARIZATION], \"adaption_state\": [0.0 - not adapted to screen, 1.0 - full adapted to screen] and \"no_white_on_white_fixup\": [0 - force white on white, 1 - keep as is]." ); } return 0; } const char *l2cms_texts[5] = {"name","copyright","manufacturer","help",0}; oyIcon_s l2cms_icon = {oyOBJECT_ICON_S, 0,0,0, 0,0,0, "lcms_logo2.png"}; /** lcm2_cmm_module * @brief l2cms Module Infos * * @version Oyranos: 0.1.10 * @since 2007/11/00 (Oyranos: 0.1.8) * @date 2008/12/30 */ oyCMM_s lcm2_cmm_module = { oyOBJECT_CMM_INFO_S, /**< type, struct type */ 0,0,0, /**< ,dynamic object functions */ CMM_NICK, /**< cmm, ICC signature */ "0.6", /**< backend_version */ l2cmsInfoGetText, /**< getText */ (char**)l2cms_texts, /**<texts; list of arguments to getText*/ OYRANOS_VERSION, /**< oy_compatibility */ (oyCMMapi_s*) & l2cms_api4_cmm, /**< api */ &l2cms_icon, /**< icon */ l2cmsCMMInit /**< oyCMMinfoInit_f */ };
GB_unop__identity_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int16_uint32 // op(A') function: GB_unop_tran__identity_int16_uint32 // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int16_uint32 ( int16_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
grib_bits_fast_big_endian_omp.c
/* * (C) Copyright 2005- ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * * In applying this licence, ECMWF does not waive the privileges and immunities granted to it by * virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction. */ /*************************************************************************** * Enrico Fucile - 19.06.2007 * * * ***************************************************************************/ int grib_decode_long_array(const unsigned char* p, long* bitp, long nbits, size_t size, long* val) { long i = 0; long countOfLeftmostBits = 0, leftmostBits = 0; long startBit; long remainingBits = nbits; long* pp = (long*)p; int inited = 0; unsigned long uval = 0; if ((max_nbits % nbits == 0) && (*bitp % nbits == 0)) { #pragma omp parallel for schedule(static) firstprivate(inited, pp) private(startBit, countOfLeftmostBits, remainingBits, leftmostBits) for (i = 0; i < size; i++) { if (!inited) { startBit = *bitp + i * nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp += startBit / max_nbits; startBit %= max_nbits; } inited = 1; } if (startBit == max_nbits) { startBit = 0; pp++; } val[i] = VALUE(*pp, startBit, remainingBits); startBit += remainingBits; remainingBits = nbits; } } else { #pragma omp parallel for schedule(static) firstprivate(inited, pp) private(startBit, countOfLeftmostBits, remainingBits, leftmostBits) for (i = 0; i < size; i++) { if (!inited) { startBit = *bitp + i * nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp += startBit / max_nbits; startBit %= max_nbits; } inited = 1; } countOfLeftmostBits = startBit + remainingBits; if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; remainingBits -= countOfLeftmostBits; leftmostBits = (VALUE(*(pp++), startBit, countOfLeftmostBits)) << remainingBits; startBit = 0; } else leftmostBits = 0; val[i] = leftmostBits + (VALUE(*pp, startBit, remainingBits)); startBit += remainingBits; remainingBits = nbits; } } *bitp += size * nbits; return GRIB_SUCCESS; } int grib_decode_double_array(const unsigned char* p, long* bitp, long nbits, double reference_value, double s, double d, size_t size, double* val) { long i = 0; long countOfLeftmostBits = 0, leftmostBits = 0; long startBit; long remainingBits = nbits; long* pp = (long*)p; int inited = 0; unsigned long uval = 0; double fact = s * d; double bias = reference_value * d; if ((max_nbits % nbits == 0) && (*bitp % nbits == 0)) { #pragma omp parallel for schedule(static) firstprivate(inited, pp) private(startBit, countOfLeftmostBits, remainingBits, leftmostBits) for (i = 0; i < size; i++) { if (!inited) { startBit = *bitp + i * nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp += startBit / max_nbits; startBit %= max_nbits; } inited = 1; } if (startBit == max_nbits) { startBit = 0; pp++; } val[i] = VALUE(*pp, startBit, remainingBits); val[i] = val[i] * fact + bias; startBit += remainingBits; remainingBits = nbits; } } else { #pragma omp parallel for schedule(static) firstprivate(inited, pp) private(startBit, countOfLeftmostBits, remainingBits, leftmostBits) for (i = 0; i < size; i++) { if (!inited) { startBit = *bitp + i * nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp += startBit / max_nbits; startBit %= max_nbits; } inited = 1; } countOfLeftmostBits = startBit + remainingBits; if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; remainingBits -= countOfLeftmostBits; leftmostBits = (VALUE(*(pp++), startBit, countOfLeftmostBits)) << remainingBits; startBit = 0; } else leftmostBits = 0; val[i] = leftmostBits + (VALUE(*pp, startBit, remainingBits)); val[i] = val[i] * fact + bias; startBit += remainingBits; remainingBits = nbits; } } *bitp += size * nbits; return GRIB_SUCCESS; } int grib_decode_double_array_complex(const unsigned char* p, long* bitp, long nbits, double reference_value, double s, double* d, size_t size, double* val) { long i = 0; long countOfLeftmostBits = 0, leftmostBits = 0; long startBit; long remainingBits = nbits; long* pp = (long*)p; int inited = 0; unsigned long uval = 0; if ((max_nbits % nbits == 0) && (*bitp % nbits == 0)) { #pragma omp parallel for schedule(static) firstprivate(inited, pp) private(startBit, countOfLeftmostBits, remainingBits, leftmostBits) for (i = 0; i < size; i++) { if (!inited) { startBit = *bitp + i * nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp += startBit / max_nbits; startBit %= max_nbits; } inited = 1; } if (startBit == max_nbits) { startBit = 0; pp++; } val[i] = VALUE(*pp, startBit, remainingBits); val[i] = ((((val[i]) * s) + reference_value) * d[i / 2]); startBit += remainingBits; remainingBits = nbits; } } else { #pragma omp parallel for schedule(static) firstprivate(inited, pp) private(startBit, countOfLeftmostBits, remainingBits, leftmostBits) for (i = 0; i < size; i++) { if (!inited) { startBit = *bitp + i * nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp += startBit / max_nbits; startBit %= max_nbits; } inited = 1; } countOfLeftmostBits = startBit + remainingBits; if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; remainingBits -= countOfLeftmostBits; leftmostBits = (VALUE(*pp, startBit, countOfLeftmostBits)) << remainingBits; startBit = 0; pp++; } else leftmostBits = 0; val[i] = leftmostBits + (VALUE(*pp, startBit, remainingBits)); val[i] = ((((val[i]) * s) + reference_value) * d[i / 2]); startBit += remainingBits; remainingBits = nbits; } } *bitp += size * nbits; return GRIB_SUCCESS; } int grib_encode_double_array(size_t n_vals, const double* val, long nbits, double reference_value, double d, double divisor, unsigned char* p, long* bitp) { long* destination = (long*)p; double* v = (double*)val; long countOfLeftmostBits = 0, startBit = 0, remainingBits = 0, rightmostBits = 0; unsigned long uval = 0; size_t i = 0; startBit = *bitp; remainingBits = nbits; if (startBit >= max_nbits) { destination += startBit / max_nbits; startBit %= max_nbits; } if ((max_nbits % nbits == 0) && (*bitp % nbits == 0)) { for (i = 0; i < n_vals; i++) { uval = (unsigned long)(((((*v) * d) - reference_value) * divisor) + 0.5); if (startBit == max_nbits) { startBit = 0; destination++; } rightmostBits = VALUE(uval, max_nbits - remainingBits, remainingBits); *destination = ((*destination) & ~MASKVALUE(startBit, remainingBits)) + (rightmostBits << max_nbits - (remainingBits + startBit)); startBit += remainingBits; remainingBits = nbits; v++; } } else { for (i = 0; i < n_vals; i++) { countOfLeftmostBits = startBit + remainingBits; uval = (unsigned long)(((((*v) * d) - reference_value) * divisor) + 0.5); if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; startBit = max_nbits - remainingBits; remainingBits -= countOfLeftmostBits; *destination = (((*destination) >> countOfLeftmostBits) << countOfLeftmostBits) + (VALUE(uval, startBit, countOfLeftmostBits)); startBit = 0; destination++; } rightmostBits = VALUE(uval, max_nbits - remainingBits, remainingBits); *destination = ((*destination) & ~MASKVALUE(startBit, remainingBits)) + (rightmostBits << max_nbits - (remainingBits + startBit)); startBit += remainingBits; remainingBits = nbits; v++; } } *bitp += n_vals * nbits; return GRIB_SUCCESS; } int grib_encode_double_array_complex(size_t n_vals, double* val, long nbits, double reference_value, double* scal, double d, double divisor, unsigned char* p, long* bitp) { long* destination = (long*)p; double* v = val; long countOfLeftmostBits = 0, startBit = 0, remainingBits = 0, rightmostBits = 0; unsigned long uval = 0; size_t i = 0; startBit = *bitp; remainingBits = nbits; if (startBit >= max_nbits) { destination += startBit / max_nbits; startBit %= max_nbits; } if ((max_nbits % nbits == 0) && (*bitp % nbits == 0)) { for (i = 0; i < n_vals; i++) { uval = (unsigned long)(((((*v) * d * scal[i / 2]) - reference_value) * divisor) + 0.5); if (startBit == max_nbits) { startBit = 0; destination++; } rightmostBits = VALUE(uval, max_nbits - remainingBits, remainingBits); *destination = ((*destination) & ~MASKVALUE(startBit, remainingBits)) + (rightmostBits << max_nbits - (remainingBits + startBit)); startBit += remainingBits; remainingBits = nbits; v++; } } else { for (i = 0; i < n_vals; i++) { countOfLeftmostBits = startBit + remainingBits; uval = (unsigned long)(((((*v) * d * scal[i / 2]) - reference_value) * divisor) + 0.5); if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; startBit = max_nbits - remainingBits; remainingBits -= countOfLeftmostBits; *destination = (((*destination) >> countOfLeftmostBits) << countOfLeftmostBits) + (VALUE(uval, startBit, countOfLeftmostBits)); startBit = 0; destination++; } rightmostBits = VALUE(uval, max_nbits - remainingBits, remainingBits); *destination = ((*destination) & ~MASKVALUE(startBit, remainingBits)) + (rightmostBits << max_nbits - (remainingBits + startBit)); startBit += remainingBits; remainingBits = nbits; v++; } } *bitp += n_vals * nbits; return 0; }
DRB114-if-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* When if() evalutes to true, this program has data races due to true dependence within the loop at 65. Data race pair: a[i+1]@66:5 vs. a[i]@66:12 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <omp.h> void task(int *a,int i) { a[i + 1] = a[i] + 1; } int main(int argc,char *argv[]) { int i; int len = 100; int a[100]; #pragma omp parallel for private (i) for (i = 0; i <= len - 1; i += 1) { a[i] = i; } srand((time(((void *)0)))); for (i = 0; i <= len - 1 - 1; i += 1) { task(&a[0],i); } printf("a[50]=%d\n",a[50]); return 0; }
SolverLamg.h
/* * SolverLamg.h * * Created on: 12.01.2015 * Author: Michael */ #ifndef SOLVERLAMG_H_ #define SOLVERLAMG_H_ #include "LevelHierarchy.h" #include "../Smoother.h" #include "../../algebraic/DenseMatrix.h" namespace NetworKit { /** * Status parameters of the solver. */ struct LAMGSolverStatus { // in count maxIters = std::numeric_limits<count>::max(); // maximum number of iterations count maxConvergenceTime = std::numeric_limits<count>::max(); // maximum time in milliseconds spent to solve the system double desiredResidualReduction = 1e-8; // desired reduction of the initial residual (finalResidual <= desiredResReduction * initialResidual) count numPreSmoothIters = 1; // number of pre smoothing iterations count numPostSmoothIters = 2; // number of post smoothing iterations // out count numIters; // number of iterations needed during solve phase double residual; // absolute final residual bool converged; // flag of conversion status std::vector<double> residualHistory; // history of absolute residuals }; /** * @ingroup numerics * Implements the solve phase of LAMG (Lean Algebraic Multigrid by Livne et al.). */ template<class Matrix> class SolverLamg { private: LevelHierarchy<Matrix> &hierarchy; const Smoother<Matrix> &smoother; // data structures for iterate recombination std::vector<std::vector<Vector>> history; std::vector<std::vector<Vector>> rHistory; std::vector<index> latestIterate; std::vector<count> numActiveIterates; // bStages for Elimination Levels std::vector<std::vector<Vector>> bStages; void solveCycle(Vector& x, const Vector& b, int finest, LAMGSolverStatus& status); void cycle(Vector& x, const Vector& b, int finest, int coarsest, std::vector<count>& numVisits, std::vector<Vector>& X, std::vector<Vector>& B, const LAMGSolverStatus& status); void multigridCycle(index level, Vector& xf, const Vector& bf); void saveIterate(index level, const Vector& x, const Vector& r); void clearHistory(index level); void minRes(index level, Vector& x, const Vector& r); public: /** * Constructs a new solver instance for the specified @a hierarchy. The @a smoother will be used for relaxing and * solving the coarser solutions. * @param hierarchy Reference to the LevelHierarchy constructed by MultiLevelSetup. * @param smoother Reference to a smoother. */ SolverLamg(LevelHierarchy<Matrix>& hierarchy, const Smoother<Matrix>& smoother) : hierarchy(hierarchy), smoother(smoother), bStages(hierarchy.size(), std::vector<Vector>()) {} SolverLamg (const SolverLamg<Matrix>& other) = default; SolverLamg (SolverLamg<Matrix>&& other) = default; virtual ~SolverLamg() = default; SolverLamg& operator=(SolverLamg<Matrix>&& other) = default; SolverLamg& operator=(const SolverLamg<Matrix>& other) = default; /** * Solves the system A*x = b for the given initial @a x and right-hand side @a b. More parameters can be specified * in @a status and additional output is also stored in @a status. After the solver finished, the approximate * solution is stored in @a x. * @param x[out] Reference to the initial guess to the solution and the approximation after the solver finished. * @param b The right-hand side vector. * @param status Reference to an LAMGSolverStatus. */ void solve(Vector& x, const Vector& b, LAMGSolverStatus& status); }; template<class Matrix> void SolverLamg<Matrix>::solve(Vector& x, const Vector& b, LAMGSolverStatus& status) { bStages = std::vector<std::vector<Vector>>(hierarchy.size(), std::vector<Vector>()); if (hierarchy.size() >= 2) { Vector bc = b; Vector xc = x; int finest = 0; if (hierarchy.getType(1) == ELIMINATION) { hierarchy.at(1).restrict(b, bc, bStages[1]); if (hierarchy.at(1).getLaplacian().numberOfRows() == 1) { x = 0.0; return; } else { hierarchy.at(1).coarseType(x, xc); finest = 1; } } solveCycle(xc, bc, finest, status); if (finest == 1) { // interpolate from finest == ELIMINATION level back to actual finest level hierarchy.at(1).interpolate(xc, x, bStages[1]); } else { x = xc; } } else { solveCycle(x, b, 0, status); } double residual = (b - hierarchy.at(0).getLaplacian() * x).length(); status.residual = residual; } template<class Matrix> void SolverLamg<Matrix>::solveCycle(Vector& x, const Vector& b, int finest, LAMGSolverStatus& status) { Aux::Timer timer; timer.start(); // data structures for iterate recombination history = std::vector<std::vector<Vector>>(hierarchy.size()); rHistory = std::vector<std::vector<Vector>>(hierarchy.size()); latestIterate = std::vector<index>(hierarchy.size(), 0); numActiveIterates = std::vector<count>(hierarchy.size(), 0); int coarsest = hierarchy.size() - 1; std::vector<count> numVisits(coarsest); std::vector<Vector> X(hierarchy.size()); std::vector<Vector> B(hierarchy.size()); for (index i = 0; i < hierarchy.size(); ++i) { history[i] = std::vector<Vector>(MAX_COMBINED_ITERATES, Vector(hierarchy.at(i).getNumberOfNodes())); rHistory[i] = std::vector<Vector>(MAX_COMBINED_ITERATES, Vector(hierarchy.at(i).getNumberOfNodes())); } Vector r = b - hierarchy.at(finest).getLaplacian() * x; double residual = r.length(); double finalResidual = residual * status.desiredResidualReduction; double bestResidual = std::numeric_limits<double>::max(); count iterations = 0; status.residualHistory.emplace_back(residual); count noResReduction = 0; while (residual > finalResidual && noResReduction < 5 && iterations < status.maxIters && timer.elapsedMilliseconds() <= status.maxConvergenceTime ) { cycle(x, b, finest, coarsest, numVisits, X, B, status); r = b - hierarchy.at(finest).getLaplacian() * x; residual = r.length(); status.residualHistory.emplace_back(residual); if (residual < bestResidual) { noResReduction = 0; bestResidual = residual; } else { ++noResReduction; } iterations++; } timer.stop(); status.numIters = iterations; status.residual = r.length(); status.converged = r.length() <= finalResidual; } template<class Matrix> void SolverLamg<Matrix>::cycle(Vector& x, const Vector& b, int finest, int coarsest, std::vector<count>& numVisits, std::vector<Vector>& X, std::vector<Vector>& B, const LAMGSolverStatus& status) { std::fill(numVisits.begin(), numVisits.end(), 0); X[finest] = x; B[finest] = b; int currLvl = finest; int nextLvl = finest; double maxVisits = 0.0; saveIterate(currLvl, X[currLvl], B[currLvl] - hierarchy.at(currLvl).getLaplacian() * X[currLvl]); while (true) { if (currLvl == coarsest) { nextLvl = currLvl - 1; if (currLvl == finest) { // finest level X[currLvl] = smoother.relax(hierarchy.at(currLvl).getLaplacian(), B[currLvl], X[currLvl], status.numPreSmoothIters); } else { Vector bCoarse(B[currLvl].getDimension()+1, 0.0); for (index i = 0; i < B[currLvl].getDimension(); ++i) { bCoarse[i] = B[currLvl][i]; } Vector xCoarse = DenseMatrix::LUSolve(hierarchy.getCoarseMatrix(), bCoarse); for (index i = 0; i < X[currLvl].getDimension(); ++i) { X[currLvl][i] = xCoarse[i]; } } } else { if (currLvl == finest) { maxVisits = 1.0; } else { maxVisits = hierarchy.cycleIndex(currLvl) * numVisits[currLvl-1]; } if (numVisits[currLvl] < maxVisits) { nextLvl = currLvl + 1; } else { nextLvl = currLvl - 1; } } if (nextLvl < finest) break; if (nextLvl > currLvl) { // preProcess numVisits[currLvl]++; if (hierarchy.getType(nextLvl) != ELIMINATION) { X[currLvl] = smoother.relax(hierarchy.at(currLvl).getLaplacian(), B[currLvl], X[currLvl], status.numPreSmoothIters); } if (hierarchy.getType(nextLvl) == ELIMINATION) { hierarchy.at(nextLvl).restrict(B[currLvl], B[nextLvl], bStages[nextLvl]); } else { hierarchy.at(nextLvl).restrict(B[currLvl] - hierarchy.at(currLvl).getLaplacian() * X[currLvl], B[nextLvl]); } hierarchy.at(nextLvl).coarseType(X[currLvl], X[nextLvl]); clearHistory(nextLvl); } else { // postProcess if (currLvl == coarsest || hierarchy.getType(currLvl+1) != ELIMINATION) { minRes(currLvl, X[currLvl], B[currLvl] - hierarchy.at(currLvl).getLaplacian() * X[currLvl]); } if (nextLvl > finest) { saveIterate(nextLvl, X[nextLvl], B[nextLvl] - hierarchy.at(nextLvl).getLaplacian() * X[nextLvl]); } if (hierarchy.getType(currLvl) == ELIMINATION) { hierarchy.at(currLvl).interpolate(X[currLvl], X[nextLvl], bStages[currLvl]); } else { Vector xf = X[nextLvl]; hierarchy.at(currLvl).interpolate(X[currLvl], xf); X[nextLvl] += xf; } if (hierarchy.getType(currLvl) != ELIMINATION) { X[nextLvl] = smoother.relax(hierarchy.at(nextLvl).getLaplacian(), B[nextLvl], X[nextLvl], status.numPostSmoothIters); } } currLvl = nextLvl; } // while // post-cycle finest if ((int64_t) hierarchy.size() > finest + 1 && hierarchy.getType(finest+1) != ELIMINATION) { // do an iterate recombination on calculated solutions minRes(finest, X[finest], B[finest] - hierarchy.at(finest).getLaplacian() * X[finest]); } X[finest] -= X[finest].mean(); x = X[finest]; } template<class Matrix> void SolverLamg<Matrix>::saveIterate(index level, const Vector& x, const Vector& r) { // update latest pointer index i = latestIterate[level]; latestIterate[level] = (i+1) % MAX_COMBINED_ITERATES; // update numIterates if (numActiveIterates[level] < MAX_COMBINED_ITERATES) { numActiveIterates[level]++; } // update history array history[level][i] = x; rHistory[level][i] = r; } template<class Matrix> void SolverLamg<Matrix>::clearHistory(index level) { latestIterate[level] = 0; numActiveIterates[level] = 0; } template<class Matrix> void SolverLamg<Matrix>::minRes(index level, Vector& x, const Vector& r) { if (numActiveIterates[level] > 0) { count n = numActiveIterates[level]; std::vector<index> ARowIdx(r.getDimension()+1); std::vector<index> ERowIdx(r.getDimension()+1); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(r.getDimension()); ++i) { for (index k = 0; k < n; ++k) { double AEvalue = r[i] - rHistory[level][k][i]; if (std::fabs(AEvalue) > 1e-25) { ++ARowIdx[i+1]; } double Eval = history[level][k][i] - x[i]; if (std::fabs(Eval) > 1e-25) { ++ERowIdx[i+1]; } } } for (index i = 0; i < r.getDimension(); ++i) { ARowIdx[i+1] += ARowIdx[i]; ERowIdx[i+1] += ERowIdx[i]; } std::vector<index> AColumnIdx(ARowIdx[r.getDimension()]); std::vector<double> ANonZeros(ARowIdx[r.getDimension()]); std::vector<index> EColumnIdx(ERowIdx[r.getDimension()]); std::vector<double> ENonZeros(ERowIdx[r.getDimension()]); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(r.getDimension()); ++i) { for (index k = 0, aIdx = ARowIdx[i], eIdx = ERowIdx[i]; k < n; ++k) { double AEvalue = r[i] - rHistory[level][k][i]; if (std::fabs(AEvalue) > 1e-25) { AColumnIdx[aIdx] = k; ANonZeros[aIdx] = AEvalue; ++aIdx; } double Eval = history[level][k][i] - x[i]; if (std::fabs(Eval) > 1e-25) { EColumnIdx[eIdx] = k; ENonZeros[eIdx] = Eval; ++eIdx; } } } CSRMatrix AE(r.getDimension(), n, ARowIdx, AColumnIdx, ANonZeros, 0.0, true); CSRMatrix E(r.getDimension(), n, ERowIdx, EColumnIdx, ENonZeros, 0.0, true); Vector alpha = smoother.relax(CSRMatrix::mTmMultiply(AE, AE), CSRMatrix::mTvMultiply(AE, r), Vector(n, 0.0), 10); x += E * alpha; } } } /* namespace NetworKit */ #endif /* SOLVERLAMG_H_ */
2Dfold.c
/* * minimum free energy * RNA secondary structure with * basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2 * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/fold.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/params/basic.h" #ifdef _OPENMP #include <omp.h> #endif #include "ViennaRNA/2Dfold.h" /* ################################# # GLOBAL VARIABLES # ################################# */ int compute_2Dfold_F3 = 0; /* ################################# # PRIVATE VARIABLES # ################################# */ /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void mfe_linear(vrna_fold_compound_t *vc); PRIVATE void mfe_circ(vrna_fold_compound_t *vc); PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars); PRIVATE void backtrack_f5(unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_c(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_m(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_m1(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_fc(int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_m2(unsigned int i, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void adjustArrayBoundaries(int ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_real, int k_max_real, int *l_min_real, int *l_max_real); INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareArray(int ***array, int min_k, int max_k, int *min_l, int *max_l); INLINE PRIVATE void prepareArray2(unsigned long ***array, int min_k, int max_k, int *min_l, int *max_l); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ #if 0 PRIVATE void initialize_TwoDfold_vars(TwoDfold_vars *vars) { update_TwoDfold_params(vars); /* this call updates the params in the ViennaRNA fold.o which is a global, so be careful * whith calling it parallel... need a workarround or fix of ViennaRNA fold stuff */ update_fold_params(); } PUBLIC TwoDfold_solution ** TwoDfold(TwoDfold_vars *vars, int distance1, int distance2) { unsigned int i, d1, d2; unsigned int maxD1; unsigned int maxD2; unsigned int length; TwoDfold_solution **output; initialize_TwoDfold_vars(vars); if (fabs(vars->P->temperature - temperature) > 1e-6) update_TwoDfold_params(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes(vars); maxD1 = vars->maxD1; maxD2 = vars->maxD2; if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1); else maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2); else maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (TwoDfold_solution **)vrna_alloc((vars->maxD1 + 1) * sizeof(TwoDfold_solution *)); mfe_linear(vars); if (vars->circ) mfe_circ(vars); length = vars->seq_length; for (d1 = 0; d1 <= maxD1; d1++) { output[d1] = (TwoDfold_solution *)vrna_alloc((vars->maxD2 + 1) * sizeof(TwoDfold_solution)); #ifdef _OPENMP #pragma omp parallel for private(d2) #endif for (d2 = 0; d2 <= maxD2; d2++) { output[d1][d2].en = (float)INF / (float)100.; output[d1][d2].s = NULL; } if ((d1 >= ((vars->circ) ? vars->k_min_values_fc : vars->k_min_values_f[length])) && (d1 <= ((vars->circ) ? vars->k_max_values_fc : vars->k_max_values_f[length]))) { #ifdef _OPENMP #pragma omp parallel for private(d2, i) #endif for (d2 = ((vars->circ) ? vars->l_min_values_fc[d1] : vars->l_min_values_f[length][d1]); d2 <= ((vars->circ) ? vars->l_max_values_fc[d1] : vars->l_max_values_f[length][d1]); d2 += 2) { output[d1][d2].en = (float)((vars->circ) ? vars->E_Fc[d1][d2 / 2] : vars->E_F5[length][d1][d2 / 2]) / (float)100.; if (vars->do_backtrack && (output[d1][d2].en != (float)INF / (float)100.)) { char *mfe_structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) mfe_structure[i] = '.'; mfe_structure[i] = '\0'; (vars->circ) ? backtrack_fc(d1, d2, mfe_structure, vars) : backtrack_f5(length, d1, d2, mfe_structure, vars); output[d1][d2].s = mfe_structure; } } } } return output; } #endif PUBLIC vrna_sol_TwoD_t * vrna_mfe_TwoD(vrna_fold_compound_t *vars, int distance1, int distance2) { unsigned int i, d1, d2; unsigned int maxD1; unsigned int maxD2; unsigned int length; unsigned int counter = 0; int en = 0; vrna_sol_TwoD_t *output; vrna_md_t *md; vrna_mx_mfe_t *matrices; maxD1 = vars->maxD1; maxD2 = vars->maxD2; matrices = vars->matrices; md = &(vars->params->model_details); if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) vrna_message_warning("vrna_mfe_TwoD@2Dfold.c: limiting maximum basepair distance 1 to %u\n", maxD1); else maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) vrna_message_warning("vrna_mfe_TwoD@2Dfold.c: limiting maximum basepair distance 2 to %u\n", maxD2); else maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (vrna_sol_TwoD_t *)vrna_alloc((((vars->maxD1 + 1) * (vars->maxD2 + 2)) / 2 + 2) * sizeof(vrna_sol_TwoD_t)); mfe_linear(vars); if (md->circ) mfe_circ(vars); length = vars->length; for (d1 = 0; d1 <= maxD1; d1++) { if ((d1 >= ((md->circ) ? matrices->k_min_Fc : matrices->k_min_F5[length])) && (d1 <= ((md->circ) ? matrices->k_max_Fc : matrices->k_max_F5[length]))) { for (d2 = ((md->circ) ? matrices->l_min_Fc[d1] : matrices->l_min_F5[length][d1]); d2 <= ((md->circ) ? matrices->l_max_Fc[d1] : matrices->l_max_F5[length][d1]); d2 += 2) { en = ((md->circ) ? matrices->E_Fc[d1][d2 / 2] : matrices->E_F5[length][d1][d2 / 2]); if (en == INF) continue; output[counter].k = d1; output[counter].l = d2; output[counter].en = (float)en / (float)100.; if (md->backtrack) { char *mfe_structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) mfe_structure[i] = '.'; mfe_structure[i] = '\0'; (md->circ) ? backtrack_fc((int)d1, (int)d2, mfe_structure, vars) : backtrack_f5(length, (int)d1, (int)d2, mfe_structure, vars); output[counter].s = mfe_structure; } else { output[counter].s = NULL; } counter++; } } } /* store entry for remaining partition if it exists */ en = ((md->circ) ? matrices->E_Fc_rem : matrices->E_F5_rem[length]); if (en != INF) { output[counter].k = -1; output[counter].l = -1; output[counter].en = (float)en / (float)100.; if (md->backtrack) { char *mfe_structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) mfe_structure[i] = '.'; mfe_structure[i] = '\0'; (md->circ) ? backtrack_fc(-1, -1, mfe_structure, vars) : backtrack_f5(length, -1, -1, mfe_structure, vars); output[counter].s = mfe_structure; } else { output[counter].s = NULL; } counter++; } /* insert end-marker entry */ output[counter].k = output[counter].l = INF; counter++; /* resize to actual dataset amount */ output = (vrna_sol_TwoD_t *)vrna_realloc(output, sizeof(vrna_sol_TwoD_t) * counter); return output; } PUBLIC char * vrna_backtrack5_TwoD(vrna_fold_compound_t *vc, int k, int l, unsigned int j) { unsigned int i; char *mfe_structure = (char *)vrna_alloc(j + 1); if (j < TURN + 2) return NULL; for (i = 0; i < j; i++) mfe_structure[i] = '.'; mfe_structure[i] = '\0'; backtrack_f5(j, k, l, mfe_structure, vc); return mfe_structure; } PRIVATE void mfe_linear(vrna_fold_compound_t *vc) { unsigned int d, i, j, ij, maxD1, maxD2, seq_length, dia, dib, dja, djb, *referenceBPs1, *referenceBPs2, *mm1, *mm2, *bpdist; int cnt1, cnt2, cnt3, cnt4, d1, d2, energy, dangles, temp2, type, additional_en, *my_iindx, *jindx, circ, *rtype; short *S1, *reference_pt1, *reference_pt2; char *sequence, *ptype; vrna_param_t *P; vrna_mx_mfe_t *matrices; vrna_md_t *md; /* dereferenciate things we often need */ P = vc->params; md = &(P->model_details); matrices = vc->matrices; sequence = vc->sequence; seq_length = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); reference_pt1 = vc->reference_pt1; reference_pt2 = vc->reference_pt2; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; mm1 = vc->mm1; mm2 = vc->mm2; bpdist = vc->bpdist; dangles = md->dangles; circ = md->circ; for (d = TURN + 2; d <= seq_length; d++) { /* i,j in [1..length] */ #ifdef _OPENMP #pragma omp parallel for private(additional_en, j, energy, temp2, i, ij, dia,dib,dja,djb,cnt1,cnt2,cnt3,cnt4, d1, d2) #endif for (j = d; j <= seq_length; j++) { unsigned int p, q, pq, u, maxp, dij; int type_2, type, tt, no_close, base_d1, base_d2; i = j - d + 1; dij = j - i - 1; ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); if (type) { /* we have a pair */ /* increase or decrease distance-to-reference value depending whether (i,j) is included in * reference or has to be introduced */ base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; /* HAIRPIN STRUCTURES */ /* get distance to reference if closing the hairpin * d = dbp(T_{i,j}, {i,j}) */ d1 = base_d1 + referenceBPs1[ij]; d2 = base_d2 + referenceBPs2[ij]; int min_k, max_k, min_l, max_l; int real_min_k, real_max_k, *min_l_real, *max_l_real; min_l = min_k = 0; max_k = mm1[ij] + referenceBPs1[ij]; max_l = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[ij], &matrices->k_min_C[ij], &matrices->k_max_C[ij], &matrices->l_min_C[ij], &matrices->l_max_C[ij] ); preparePosteriorBoundaries(matrices->k_max_C[ij] - matrices->k_min_C[ij] + 1, matrices->k_min_C[ij], &real_min_k, &real_max_k, &min_l_real, &max_l_real ); prepareArray(&matrices->E_C[ij], matrices->k_min_C[ij], matrices->k_max_C[ij], matrices->l_min_C[ij], matrices->l_max_C[ij] ); #ifdef COUNT_STATES prepareArray2(&matrices->N_C[ij], matrices->k_min_C[ij], matrices->k_max_C[ij], matrices->l_min_C[ij], matrices->l_max_C[ij] ); #endif /* d1 and d2 are the distancies to both references introduced by closing a hairpin structure at (i,j) */ if ((d1 >= 0) && (d2 >= 0)) { if (((unsigned int)d1 <= maxD1) && ((unsigned int)d2 <= maxD2)) { matrices->E_C[ij][d1][d2 / 2] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, P); updatePosteriorBoundaries(d1, d2, &real_min_k, &real_max_k, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_C[ij][d1][d2 / 2] = 1; #endif } else { matrices->E_C_rem[ij] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, P); } } /* INTERIOR LOOP STRUCTURES */ maxp = MIN2(j - 2 - TURN, i + MAXLOOP + 1); for (p = i + 1; p <= maxp; p++) { unsigned int minq = p + TURN + 1; unsigned int ln_pre = dij + p; if (ln_pre > minq + MAXLOOP) minq = ln_pre - MAXLOOP - 1; for (q = minq; q < j; q++) { pq = my_iindx[p] - q; /* set distance to reference structure... */ type_2 = ptype[jindx[q] + p]; if (type_2 == 0) continue; type_2 = rtype[type_2]; /* get distance to reference if closing the interior loop * d2 = dbp(S_{i,j}, S_{p.q} + {i,j}) */ d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[pq]; if (no_closingGU) if (no_close || (type_2 == 3) || (type_2 == 4)) if ((p > i + 1) || (q < j - 1)) continue; /* continue unless stack */ energy = E_IntLoop(p - i - 1, j - q - 1, type, type_2, S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); if (matrices->E_C[pq] != NULL) { for (cnt1 = matrices->k_min_C[pq]; cnt1 <= matrices->k_max_C[pq]; cnt1++) { for (cnt2 = matrices->l_min_C[pq][cnt1]; cnt2 <= matrices->l_max_C[pq][cnt1]; cnt2 += 2) { if (matrices->E_C[pq][cnt1][cnt2 / 2] != INF) { if (((cnt1 + d1) <= maxD1) && ((cnt2 + d2) <= maxD2)) { matrices->E_C[ij][cnt1 + d1][(cnt2 + d2) / 2] = MIN2(matrices->E_C[ij][cnt1 + d1][(cnt2 + d2) / 2], matrices->E_C[pq][cnt1][cnt2 / 2] + energy ); updatePosteriorBoundaries(cnt1 + d1, cnt2 + d2, &real_min_k, &real_max_k, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_C[ij][cnt1 + d1][(cnt2 + d2) / 2] += matrices->N_C[pq][cnt1][cnt2 / 2]; #endif } /* collect all cases where d1+cnt1 or d2+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_C[pq][cnt1][cnt2 / 2] + energy); } } } } } /* collect all contributions where C[pq] already lies outside k_max, l_max boundary */ if (matrices->E_C_rem[pq] != INF) matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_C_rem[pq] + energy); } /* end q-loop */ } /* end p-loop */ /* MULTI LOOP STRUCTURES */ if (!no_close) { /* dangle energies for multiloop closing stem */ tt = rtype[type]; temp2 = P->MLclosing; if (dangles == 2) temp2 += E_MLstem(tt, S1[j - 1], S1[i + 1], P); else temp2 += E_MLstem(tt, -1, -1, P); for (u = i + TURN + 2; u < j - TURN - 2; u++) { int i1u = my_iindx[i + 1] - u; int u1j1 = my_iindx[u + 1] - j + 1; /* check all cases where either M or M1 are already out of scope of maxD1 and/or maxD2 */ if (matrices->E_M_rem[i1u] != INF) { for (cnt3 = matrices->k_min_M1[u1j1]; cnt3 <= matrices->k_max_M1[u1j1]; cnt3++) for (cnt4 = matrices->l_min_M1[u1j1][cnt3]; cnt4 <= matrices->l_max_M1[u1j1][cnt3]; cnt4 += 2) { if (matrices->E_M1[u1j1][cnt3][cnt4 / 2] != INF) { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_M_rem[i1u] + matrices->E_M1[u1j1][cnt3][cnt4 / 2] + temp2 ); } } if (matrices->E_M1_rem[u1j1] != INF) { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_M_rem[i1u] + matrices->E_M1_rem[u1j1] + temp2 ); } } if (matrices->E_M1_rem[u1j1] != INF) { for (cnt1 = matrices->k_min_M[i1u]; cnt1 <= matrices->k_max_M[i1u]; cnt1++) for (cnt2 = matrices->l_min_M[i1u][cnt1]; cnt2 <= matrices->l_max_M[i1u][cnt1]; cnt2 += 2) if (matrices->E_M[i1u][cnt1][cnt2 / 2] != INF) { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_M[i1u][cnt1][cnt2 / 2] + matrices->E_M1_rem[u1j1] + temp2 ); } } /* get distance to reference if closing the multiloop * d = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1}) */ if (!matrices->E_M[i1u]) continue; if (!matrices->E_M1[u1j1]) continue; d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[i1u] - referenceBPs1[u1j1]; d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[i1u] - referenceBPs2[u1j1]; for (cnt1 = matrices->k_min_M[i1u]; cnt1 <= matrices->k_max_M[i1u]; cnt1++) for (cnt2 = matrices->l_min_M[i1u][cnt1]; cnt2 <= matrices->l_max_M[i1u][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_M1[u1j1]; cnt3 <= matrices->k_max_M1[u1j1]; cnt3++) for (cnt4 = matrices->l_min_M1[u1j1][cnt3]; cnt4 <= matrices->l_max_M1[u1j1][cnt3]; cnt4 += 2) { if ((matrices->E_M[i1u][cnt1][cnt2 / 2] != INF) && (matrices->E_M1[u1j1][cnt3][cnt4 / 2] != INF)) { if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) { matrices->E_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2(matrices->E_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2], matrices->E_M[i1u][cnt1][cnt2 / 2] + matrices->E_M1[u1j1][cnt3][cnt4 / 2] + temp2 ); updatePosteriorBoundaries(cnt1 + cnt3 + d1, cnt2 + cnt4 + d2, &real_min_k, &real_max_k, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] += matrices->N_M[i1u][cnt1][cnt2 / 2] * matrices->N_M1[u1j1][cnt3][cnt4 / 2]; #endif } /* collect all cases where d1+cnt1+cnt3 or d2+cnt2+cnt4 exceeds maxD1, maxD2, respectively */ else { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_M[i1u][cnt1][cnt2 / 2] + matrices->E_M1[u1j1][cnt3][cnt4 / 2] + temp2 ); } } } } } /* resize and move memory portions of energy matrix E_C */ adjustArrayBoundaries(&matrices->E_C[ij], &matrices->k_min_C[ij], &matrices->k_max_C[ij], &matrices->l_min_C[ij], &matrices->l_max_C[ij], real_min_k, real_max_k, min_l_real, max_l_real ); #ifdef COUNT_STATES /* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/ #endif } /* end >> if (pair) << */ /* done with c[i,j], now compute fML[i,j] */ /* free ends ? -----------------------------------------*/ dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i + 1] - j]; dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i + 1] - j]; dja = referenceBPs1[ij] - referenceBPs1[ij + 1]; djb = referenceBPs2[ij] - referenceBPs2[ij + 1]; if (dangles == 2) temp2 = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P); else temp2 = E_MLstem(type, -1, -1, P); int min_k_guess, max_k_guess, min_l_guess, max_l_guess; int min_k_real_m, max_k_real_m, *min_l_real_m, *max_l_real_m; int min_k_real_m1, max_k_real_m1, *min_l_real_m1, *max_l_real_m1; min_k_guess = min_l_guess = 0; max_k_guess = mm1[ij] + referenceBPs1[ij]; max_l_guess = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(min_k_guess, max_k_guess, min_l_guess, max_l_guess, bpdist[ij], &matrices->k_min_M[ij], &matrices->k_max_M[ij], &matrices->l_min_M[ij], &matrices->l_max_M[ij] ); prepareBoundaries(min_k_guess, max_k_guess, min_l_guess, max_l_guess, bpdist[ij], &matrices->k_min_M1[ij], &matrices->k_max_M1[ij], &matrices->l_min_M1[ij], &matrices->l_max_M1[ij] ); preparePosteriorBoundaries(matrices->k_max_M[ij] - matrices->k_min_M[ij] + 1, matrices->k_min_M[ij], &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); preparePosteriorBoundaries(matrices->k_max_M1[ij] - matrices->k_min_M1[ij] + 1, matrices->k_min_M1[ij], &min_k_real_m1, &max_k_real_m1, &min_l_real_m1, &max_l_real_m1 ); prepareArray(&matrices->E_M[ij], matrices->k_min_M[ij], matrices->k_max_M[ij], matrices->l_min_M[ij], matrices->l_max_M[ij] ); prepareArray(&matrices->E_M1[ij], matrices->k_min_M1[ij], matrices->k_max_M1[ij], matrices->l_min_M1[ij], matrices->l_max_M1[ij] ); #ifdef COUNT_STATES prepareArray2(&matrices->N_M[ij], matrices->k_min_M[ij], matrices->k_max_M[ij], matrices->l_min_M[ij], matrices->l_max_M[ij] ); prepareArray2(&matrices->N_M1[ij], matrices->k_min_M1[ij], matrices->k_max_M1[ij], matrices->l_min_M1[ij], matrices->l_max_M1[ij] ); #endif /* now to the actual computations... */ /* 1st E_M[ij] = E_M1[ij] = E_C[ij] + b */ if (matrices->E_C_rem[ij] != INF) matrices->E_M_rem[ij] = matrices->E_M1_rem[ij] = temp2 + matrices->E_C_rem[ij]; if (matrices->E_C[ij]) { for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) { for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) { if (matrices->E_C[ij][cnt1][cnt2 / 2] != INF) { matrices->E_M[ij][cnt1][cnt2 / 2] = matrices->E_M1[ij][cnt1][cnt2 / 2] = temp2 + matrices->E_C[ij][cnt1][cnt2 / 2]; updatePosteriorBoundaries(cnt1, cnt2, &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real_m1, &max_k_real_m1, &min_l_real_m1, &max_l_real_m1 ); #ifdef COUNT_STATES matrices->N_M[ij][cnt1][cnt2 / 2] = matrices->N_M1[ij][cnt1][cnt2 / 2] = matrices->N_C[ij][cnt1][cnt2 / 2]; #endif } } } } /* 2nd E_M[ij] = MIN(E_M[ij], E_M[i+1,j] + c) */ if (matrices->E_M_rem[my_iindx[i + 1] - j] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M_rem[my_iindx[i + 1] - j] + P->MLbase ); } if (matrices->E_M[my_iindx[i + 1] - j]) { for (cnt1 = matrices->k_min_M[my_iindx[i + 1] - j]; cnt1 <= matrices->k_max_M[my_iindx[i + 1] - j]; cnt1++) { for (cnt2 = matrices->l_min_M[my_iindx[i + 1] - j][cnt1]; cnt2 <= matrices->l_max_M[my_iindx[i + 1] - j][cnt1]; cnt2 += 2) { if (matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] != INF) { if (((cnt1 + dia) <= maxD1) && ((cnt2 + dib) <= maxD2)) { matrices->E_M[ij][cnt1 + dia][(cnt2 + dib) / 2] = MIN2(matrices->E_M[ij][cnt1 + dia][(cnt2 + dib) / 2], matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase ); updatePosteriorBoundaries(cnt1 + dia, cnt2 + dib, &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); #ifdef COUNT_STATES matrices->N_M[ij][cnt1 + dia][(cnt2 + dib) / 2] += matrices->N_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2]; #endif } /* collect all cases where dia+cnt1 or dib+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase ); } } } } } /* 3rd E_M[ij] = MIN(E_M[ij], E_M[i,j-1] + c) */ if (matrices->E_M_rem[ij + 1] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M_rem[ij + 1] + P->MLbase ); } if (matrices->E_M[ij + 1]) { for (cnt1 = matrices->k_min_M[ij + 1]; cnt1 <= matrices->k_max_M[ij + 1]; cnt1++) { for (cnt2 = matrices->l_min_M[ij + 1][cnt1]; cnt2 <= matrices->l_max_M[ij + 1][cnt1]; cnt2 += 2) { if (matrices->E_M[ij + 1][cnt1][cnt2 / 2] != INF) { if (((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)) { matrices->E_M[ij][cnt1 + dja][(cnt2 + djb) / 2] = MIN2(matrices->E_M[ij][cnt1 + dja][(cnt2 + djb) / 2], matrices->E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase ); updatePosteriorBoundaries(cnt1 + dja, cnt2 + djb, &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); #ifdef COUNT_STATES matrices->N_M[ij][cnt1 + dja][(cnt2 + djb) / 2] += matrices->N_M[ij + 1][cnt1][cnt2 / 2]; #endif } /* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase ); } } } } } /* 4th E_M1[ij] = MIN(E_M1[ij], E_M1[i,j-1] + c) */ if (matrices->E_M1_rem[ij + 1] != INF) { matrices->E_M1_rem[ij] = MIN2(matrices->E_M1_rem[ij], matrices->E_M1_rem[ij + 1] + P->MLbase ); } if (matrices->E_M1[ij + 1]) { for (cnt1 = matrices->k_min_M1[ij + 1]; cnt1 <= matrices->k_max_M1[ij + 1]; cnt1++) { for (cnt2 = matrices->l_min_M1[ij + 1][cnt1]; cnt2 <= matrices->l_max_M1[ij + 1][cnt1]; cnt2 += 2) { if (matrices->E_M1[ij + 1][cnt1][cnt2 / 2] != INF) { if (((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)) { matrices->E_M1[ij][cnt1 + dja][(cnt2 + djb) / 2] = MIN2(matrices->E_M1[ij][cnt1 + dja][(cnt2 + djb) / 2], matrices->E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase ); updatePosteriorBoundaries(cnt1 + dja, cnt2 + djb, &min_k_real_m1, &max_k_real_m1, &min_l_real_m1, &max_l_real_m1 ); #ifdef COUNT_STATES matrices->N_M1[ij][cnt1 + dja][(cnt2 + djb) / 2] += matrices->N_M1[ij + 1][cnt1][cnt2 / 2]; #endif } /* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_M1_rem[ij] = MIN2(matrices->E_M1_rem[ij], matrices->E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase ); } } } } } /* 5th E_M[ij] = MIN(E_M[ij], min(E_M[i,k] + E_M[k+1,j])) */ if (j > TURN + 2) { for (u = i + 1 + TURN; u <= j - 2 - TURN; u++) { /* check all cases where M(i,u) and/or M(u+1,j) are already out of scope of maxD1 and/or maxD2 */ if (matrices->E_M_rem[my_iindx[i] - u] != INF) { for (cnt3 = matrices->k_min_M[my_iindx[u + 1] - j]; cnt3 <= matrices->k_max_M[my_iindx[u + 1] - j]; cnt3++) { for (cnt4 = matrices->l_min_M[my_iindx[u + 1] - j][cnt3]; cnt4 <= matrices->l_max_M[my_iindx[u + 1] - j][cnt3]; cnt4 += 2) { if (matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M_rem[my_iindx[i] - u] + matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] ); } } } if (matrices->E_M_rem[my_iindx[u + 1] - j] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M_rem[my_iindx[i] - u] + matrices->E_M_rem[my_iindx[u + 1] - j] ); } } if (matrices->E_M_rem[my_iindx[u + 1] - j] != INF) { for (cnt1 = matrices->k_min_M[my_iindx[i] - u]; cnt1 <= matrices->k_max_M[my_iindx[i] - u]; cnt1++) { for (cnt2 = matrices->l_min_M[my_iindx[i] - u][cnt1]; cnt2 <= matrices->l_max_M[my_iindx[i] - u][cnt1]; cnt2 += 2) { if (matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + matrices->E_M_rem[my_iindx[u + 1] - j] ); } } } } if (!matrices->E_M[my_iindx[i] - u]) continue; if (!matrices->E_M[my_iindx[u + 1] - j]) continue; dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i] - u] - referenceBPs1[my_iindx[u + 1] - j]; dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i] - u] - referenceBPs2[my_iindx[u + 1] - j]; for (cnt1 = matrices->k_min_M[my_iindx[i] - u]; cnt1 <= matrices->k_max_M[my_iindx[i] - u]; cnt1++) { for (cnt2 = matrices->l_min_M[my_iindx[i] - u][cnt1]; cnt2 <= matrices->l_max_M[my_iindx[i] - u][cnt1]; cnt2 += 2) { for (cnt3 = matrices->k_min_M[my_iindx[u + 1] - j]; cnt3 <= matrices->k_max_M[my_iindx[u + 1] - j]; cnt3++) { for (cnt4 = matrices->l_min_M[my_iindx[u + 1] - j][cnt3]; cnt4 <= matrices->l_max_M[my_iindx[u + 1] - j][cnt3]; cnt4 += 2) { if ((matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] != INF) && (matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] != INF)) { if (((cnt1 + cnt3 + dia) <= maxD1) && ((cnt2 + cnt4 + dib) <= maxD2)) { matrices->E_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2] = MIN2(matrices->E_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2], matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] ); updatePosteriorBoundaries(cnt1 + cnt3 + dia, cnt2 + cnt4 + dib, &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); #ifdef COUNT_STATES matrices->N_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2] += matrices->N_M[my_iindx[i] - u][cnt1][cnt2 / 2] * matrices->N_M1[my_iindx[u + 1] - j][cnt3][cnt4 / 2]; #endif } /* collect all cases where dia+cnt1+cnt3 or dib+cnt2+cnt4 exceeds maxD1, maxD2, respectively */ else { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] ); } } } } } } } } /* thats all folks for the multiloop decomposition... */ adjustArrayBoundaries(&matrices->E_M[ij], &matrices->k_min_M[ij], &matrices->k_max_M[ij], &matrices->l_min_M[ij], &matrices->l_max_M[ij], min_k_real_m, max_k_real_m, min_l_real_m, max_l_real_m ); adjustArrayBoundaries(&matrices->E_M1[ij], &matrices->k_min_M1[ij], &matrices->k_max_M1[ij], &matrices->l_min_M1[ij], &matrices->l_max_M1[ij], min_k_real_m1, max_k_real_m1, min_l_real_m1, max_l_real_m1 ); #ifdef COUNT_STATES /* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/ #endif } /* end of j-loop */ } /* calculate energies of 5' and 3' fragments */ /* prepare first entries in E_F5 */ for (cnt1 = 1; cnt1 <= TURN + 1; cnt1++) { matrices->E_F5[cnt1] = (int **)vrna_alloc(sizeof(int *)); matrices->E_F5[cnt1][0] = (int *)vrna_alloc(sizeof(int)); matrices->E_F5[cnt1][0][0] = 0; matrices->E_F5_rem[cnt1] = INF; matrices->k_min_F5[cnt1] = matrices->k_max_F5[cnt1] = 0; matrices->l_min_F5[cnt1] = (int *)vrna_alloc(sizeof(int)); matrices->l_max_F5[cnt1] = (int *)vrna_alloc(sizeof(int)); matrices->l_min_F5[cnt1][0] = matrices->l_max_F5[cnt1][0] = 0; #ifdef COUNT_STATES matrices->N_F5[cnt1] = (unsigned long **)vrna_alloc(sizeof(unsigned long *)); matrices->N_F5[cnt1][0] = (unsigned long *)vrna_alloc(sizeof(unsigned long)); matrices->N_F5[cnt1][0][0] = 1; #endif } for (j = TURN + 2; j <= seq_length; j++) { unsigned int da = referenceBPs1[my_iindx[1] - j] - referenceBPs1[my_iindx[1] - j + 1]; unsigned int db = referenceBPs2[my_iindx[1] - j] - referenceBPs2[my_iindx[1] - j + 1]; type = ptype[jindx[j] + 1]; additional_en = 0; if (type) { if (dangles == 2) additional_en += vrna_E_ext_stem(type, -1, j < seq_length ? S1[j + 1] : -1, P); else additional_en += vrna_E_ext_stem(type, -1, -1, P); } /* make min and max k guess for memory allocation */ int min_k_guess, max_k_guess, min_l_guess, max_l_guess; int *min_l_real, *max_l_real, min_k_real, max_k_real; min_k_guess = min_l_guess = 0; max_k_guess = referenceBPs1[my_iindx[1] - j] + mm1[my_iindx[1] - j]; max_l_guess = referenceBPs2[my_iindx[1] - j] + mm2[my_iindx[1] - j]; prepareBoundaries(min_k_guess, max_k_guess, min_l_guess, max_l_guess, bpdist[my_iindx[1] - j], &matrices->k_min_F5[j], &matrices->k_max_F5[j], &matrices->l_min_F5[j], &matrices->l_max_F5[j] ); preparePosteriorBoundaries(matrices->k_max_F5[j] - matrices->k_min_F5[j] + 1, matrices->k_min_F5[j], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); prepareArray(&matrices->E_F5[j], matrices->k_min_F5[j], matrices->k_max_F5[j], matrices->l_min_F5[j], matrices->l_max_F5[j] ); #ifdef COUNT_STATES prepareArray2(&matrices->N_F5[j], matrices->k_min_F5[j], matrices->k_max_F5[j], matrices->l_min_F5[j], matrices->l_max_F5[j] ); #endif /* begin the actual computation of 5' end energies */ /* j-1 is unpaired ... */ matrices->E_F5_rem[j] = matrices->E_F5_rem[j - 1]; for (cnt1 = matrices->k_min_F5[j - 1]; cnt1 <= matrices->k_max_F5[j - 1]; cnt1++) { for (cnt2 = matrices->l_min_F5[j - 1][cnt1]; cnt2 <= matrices->l_max_F5[j - 1][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->E_F5[j][cnt1 + da][(cnt2 + db) / 2] = MIN2(matrices->E_F5[j][cnt1 + da][(cnt2 + db) / 2], matrices->E_F5[j - 1][cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_F5[j][cnt1 + da][(cnt2 + db) / 2] += matrices->N_F5[j - 1][cnt1][cnt2 / 2]; #endif } /* collect all cases where da+cnt1 or db+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5[j - 1][cnt1][cnt2 / 2]); } } } /* j pairs with 1 */ if (matrices->E_C_rem[my_iindx[1] - j] != INF) matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_C_rem[my_iindx[1] - j] + additional_en); if (matrices->E_C[my_iindx[1] - j]) { for (cnt1 = matrices->k_min_C[my_iindx[1] - j]; cnt1 <= matrices->k_max_C[my_iindx[1] - j]; cnt1++) for (cnt2 = matrices->l_min_C[my_iindx[1] - j][cnt1]; cnt2 <= matrices->l_max_C[my_iindx[1] - j][cnt1]; cnt2 += 2) { if (matrices->E_C[my_iindx[1] - j][cnt1][cnt2 / 2] != INF) { matrices->E_F5[j][cnt1][cnt2 / 2] = MIN2(matrices->E_F5[j][cnt1][cnt2 / 2], matrices->E_C[my_iindx[1] - j][cnt1][cnt2 / 2] + additional_en ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_F5[j][cnt1][cnt2 / 2] += matrices->N_C[my_iindx[1] - j][cnt1][cnt2 / 2]; #endif } } } /* j pairs with some other nucleotide -> see below */ for (i = j - TURN - 1; i > 1; i--) { ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { if (dangles == 2) additional_en = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P); else additional_en = vrna_E_ext_stem(type, -1, -1, P); if (matrices->E_C_rem[ij] != INF) { for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++) for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) { if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF) { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C_rem[ij] + additional_en ); } } if (matrices->E_F5_rem[i - 1] != INF) { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5_rem[i - 1] + matrices->E_C_rem[ij] + additional_en ); } } if ((matrices->E_F5_rem[i - 1] != INF) && (matrices->E_C[ij])) { for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) if (matrices->E_C[ij][cnt1][cnt2 / 2] != INF) { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5_rem[i - 1] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en ); } } if (!matrices->E_C[ij]) continue; unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1]; unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1]; for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++) for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) { if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF && matrices->E_C[ij][cnt1][cnt2 / 2] != INF) { if (((cnt1 + cnt3 + d1a) <= maxD1) && ((cnt2 + cnt4 + d1b) <= maxD2)) { matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] = MIN2(matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2], matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en ); updatePosteriorBoundaries(cnt1 + cnt3 + d1a, cnt2 + cnt4 + d1b, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] += matrices->N_F5[i - 1][cnt3][cnt4 / 2] * matrices->N_C[ij][cnt1][cnt2 / 2]; #endif } /* collect all cases where d1a+cnt1+cnt3 or d1b+cnt2+cnt4 exceeds maxD1, maxD2, respectively */ else { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en ); } } } } } /* resize and move memory portions of energy matrix E_F5 */ adjustArrayBoundaries(&matrices->E_F5[j], &matrices->k_min_F5[j], &matrices->k_max_F5[j], &matrices->l_min_F5[j], &matrices->l_max_F5[j], min_k_real, max_k_real, min_l_real, max_l_real ); } /* end of j-loop */ if (compute_2Dfold_F3) { /* prepare first entries in E_F3 */ for (cnt1 = seq_length; cnt1 >= seq_length - TURN - 1; cnt1--) { matrices->E_F3[cnt1] = (int **)vrna_alloc(sizeof(int *)); matrices->E_F3[cnt1][0] = (int *)vrna_alloc(sizeof(int)); matrices->E_F3[cnt1][0][0] = 0; matrices->k_min_F3[cnt1] = matrices->k_max_F3[cnt1] = 0; matrices->l_min_F3[cnt1] = (int *)vrna_alloc(sizeof(int)); matrices->l_max_F3[cnt1] = (int *)vrna_alloc(sizeof(int)); matrices->l_min_F3[cnt1][0] = matrices->l_max_F3[cnt1][0] = 0; } /* begin calculations */ for (j = seq_length - TURN - 2; j >= 1; j--) { unsigned int da = referenceBPs1[my_iindx[j] - seq_length] - referenceBPs1[my_iindx[j + 1] - seq_length]; unsigned int db = referenceBPs2[my_iindx[j] - seq_length] - referenceBPs2[my_iindx[j + 1] - seq_length]; type = ptype[jindx[seq_length] + j]; additional_en = 0; if (type) { if (dangles == 2) additional_en += vrna_E_ext_stem(type, j > 1 ? S1[j - 1] : -1, -1, P); else additional_en += vrna_E_ext_stem(type, -1, -1, P); } /* make min and max k guess for memory allocation */ int min_k_guess, max_k_guess, min_l_guess, max_l_guess; int *min_l_real, *max_l_real, min_k_real, max_k_real; min_k_guess = min_l_guess = 0; max_k_guess = referenceBPs1[my_iindx[j] - seq_length] + mm1[my_iindx[j] - seq_length]; max_l_guess = referenceBPs2[my_iindx[j] - seq_length] + mm2[my_iindx[j] - seq_length]; prepareBoundaries(min_k_guess, max_k_guess, min_l_guess, max_l_guess, bpdist[my_iindx[j] - seq_length], &matrices->k_min_F3[j], &matrices->k_max_F3[j], &matrices->l_min_F3[j], &matrices->l_max_F3[j] ); preparePosteriorBoundaries(matrices->k_max_F3[j] - matrices->k_min_F3[j] + 1, matrices->k_min_F3[j], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); prepareArray(&matrices->E_F3[j], matrices->k_min_F3[j], matrices->k_max_F3[j], matrices->l_min_F3[j], matrices->l_max_F3[j] ); /* begin the actual computation of 5' end energies */ /* j is unpaired ... */ for (cnt1 = matrices->k_min_F3[j + 1]; cnt1 <= matrices->k_max_F3[j + 1]; cnt1++) { for (cnt2 = matrices->l_min_F3[j + 1][cnt1]; cnt2 <= matrices->l_max_F3[j + 1][cnt1]; cnt2 += 2) { matrices->E_F3[j][cnt1 + da][(cnt2 + db) / 2] = MIN2(matrices->E_F3[j][cnt1 + da][(cnt2 + db) / 2], matrices->E_F3[j + 1][cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } /* j pairs with n */ if (matrices->E_C[my_iindx[j] - seq_length]) { for (cnt1 = matrices->k_min_C[my_iindx[j] - seq_length]; cnt1 <= matrices->k_max_C[my_iindx[j] - seq_length]; cnt1++) for (cnt2 = matrices->l_min_C[my_iindx[j] - seq_length][cnt1]; cnt2 <= matrices->l_max_C[my_iindx[j] - seq_length][cnt1]; cnt2 += 2) { if (matrices->E_C[my_iindx[j] - seq_length][cnt1][cnt2 / 2] != INF) { matrices->E_F3[j][cnt1][cnt2 / 2] = MIN2(matrices->E_F3[j][cnt1][cnt2 / 2], matrices->E_C[my_iindx[j] - seq_length][cnt1][cnt2 / 2] + additional_en ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } } /* j pairs with some other nucleotide -> see below */ for (i = j - TURN - 1; i > 1; i--) { ij = my_iindx[i] - j; if (!matrices->E_C[ij]) continue; type = ptype[jindx[j] + i]; if (type) { unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1]; unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1]; if (dangles == 2) additional_en = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P); else additional_en = vrna_E_ext_stem(type, -1, -1, P); for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++) for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) { if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF && matrices->E_C[ij][cnt1][cnt2 / 2] != INF) { matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] = MIN2(matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2], matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en ); updatePosteriorBoundaries(cnt1 + cnt3 + d1a, cnt2 + cnt4 + d1b, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] += matrices->N_F5[i - 1][cnt3][cnt4 / 2] * matrices->N_C[ij][cnt1][cnt2 / 2]; #endif } } } } /* resize and move memory portions of energy matrix E_F5 */ adjustArrayBoundaries(&matrices->E_F5[j], &matrices->k_min_F5[j], &matrices->k_max_F5[j], &matrices->l_min_F5[j], &matrices->l_max_F5[j], min_k_real, max_k_real, min_l_real, max_l_real ); } /* end of j-loop */ } } /*---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE void backtrack_f5(unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc) { int *my_iindx, *jindx, energy, type, dangles, cnt1, cnt2, cnt3, cnt4; int **l_min_C, **l_max_C, **l_min_F5, **l_max_F5; int *k_min_C, *k_max_C, *k_min_F5, *k_max_F5; int ***E_C, ***E_F5; int *E_C_rem, *E_F5_rem; unsigned int i, ij, seq_length, maxD1, maxD2; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *ptype; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; unsigned int da, db; P = vc->params; md = &(P->model_details); matrices = vc->matrices; seq_length = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; E_F5 = matrices->E_F5; l_min_F5 = matrices->l_min_F5; l_max_F5 = matrices->l_max_F5; k_min_F5 = matrices->k_min_F5; k_max_F5 = matrices->k_max_F5; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_F5_rem = matrices->E_F5_rem; E_C_rem = matrices->E_C_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; da = referenceBPs1[my_iindx[1] - j] - referenceBPs1[my_iindx[1] - j + 1]; db = referenceBPs2[my_iindx[1] - j] - referenceBPs2[my_iindx[1] - j + 1]; if (j < TURN + 2) return; /* F5[j] == F5[j-1] ? */ if (k == -1) { if (E_F5_rem[j] == INF) { return; } else if (E_F5_rem[j] == E_F5_rem[j - 1]) { backtrack_f5(j - 1, k, l, structure, vc); return; } else if (E_F5[j - 1]) { for (cnt1 = k_min_F5[j - 1]; cnt1 <= k_max_F5[j - 1]; cnt1++) { for (cnt2 = l_min_F5[j - 1][cnt1]; cnt2 <= l_max_F5[j - 1][cnt1]; cnt2 += 2) { if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { if (E_F5_rem[j] == E_F5[j - 1][cnt1][cnt2 / 2]) { backtrack_f5(j - 1, cnt1, cnt2, structure, vc); return; } } } } } } else if ((k >= da) && (l >= db)) { if (E_F5[j - 1]) { if ((k - da >= k_min_F5[j - 1]) && (k - da <= k_max_F5[j - 1])) { if ((l - db >= l_min_F5[j - 1][k - da]) && (l - db <= l_max_F5[j - 1][k - da])) { if (E_F5[j - 1][k - da][(l - db) / 2] == E_F5[j][k][l / 2]) { backtrack_f5(j - 1, k - da, l - db, structure, vc); return; } } } } } type = ptype[jindx[j] + 1]; if (type) { if (dangles == 2) energy = vrna_E_ext_stem(type, -1, j < seq_length ? S1[j + 1] : -1, P); else energy = vrna_E_ext_stem(type, -1, -1, P); if (k == -1) { if (E_C_rem[my_iindx[1] - j] + energy == E_F5_rem[j]) { backtrack_c(1, j, -1, -1, structure, vc); return; } } else if (k >= k_min_C[my_iindx[1] - j] && (k <= k_max_C[my_iindx[1] - j])) { if ((l >= l_min_C[my_iindx[1] - j][k]) && (l <= l_max_C[my_iindx[1] - j][k])) { if (E_C[my_iindx[1] - j][k][l / 2] + energy == E_F5[j][k][l / 2]) { backtrack_c(1, j, k, l, structure, vc); return; } } } } for (i = j - TURN - 1; i > 1; i--) { ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1]; unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1]; if (dangles == 2) energy = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P); else energy = vrna_E_ext_stem(type, -1, -1, P); if (k == -1) { if (E_C_rem[ij] != INF) { for (cnt1 = k_min_F5[i - 1]; cnt1 <= k_max_F5[i - 1]; cnt1++) { for (cnt2 = l_min_F5[i - 1][cnt1]; cnt2 <= l_max_F5[i - 1][cnt1]; cnt2 += 2) { if (E_F5_rem[j] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C_rem[ij] + energy)) { backtrack_f5(i - 1, cnt1, cnt2, structure, vc); backtrack_c(i, j, -1, -1, structure, vc); return; } } } if (E_F5_rem[j] == (E_F5_rem[i - 1] + E_C_rem[ij] + energy)) { backtrack_f5(i - 1, -1, -1, structure, vc); backtrack_c(i, j, -1, -1, structure, vc); return; } } if (E_F5_rem[i - 1] != INF) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) { for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) { if (E_F5_rem[j] == (E_F5_rem[i - 1] + E_C[ij][cnt1][cnt2 / 2] + energy)) { backtrack_f5(i - 1, -1, -1, structure, vc); backtrack_c(i, j, cnt1, cnt2, structure, vc); return; } } } } for (cnt1 = k_min_F5[i - 1]; cnt1 <= k_max_F5[i - 1]; cnt1++) for (cnt2 = l_min_F5[i - 1][cnt1]; cnt2 <= l_max_F5[i - 1][cnt1]; cnt2 += 2) for (cnt3 = k_min_C[ij]; cnt3 <= k_max_C[ij]; cnt3++) for (cnt4 = l_min_C[ij][cnt3]; cnt4 <= l_max_C[ij][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1a) > maxD1) || ((cnt2 + cnt4 + d1b) > maxD2)) { if (E_F5_rem[j] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C[ij][cnt3][cnt4 / 2] + energy)) { backtrack_f5(i - 1, cnt1, cnt2, structure, vc); backtrack_c(i, j, cnt3, cnt4, structure, vc); return; } } } } else if ((k >= d1a) && (l >= d1b)) { int k_f_max = MIN2(k - d1a, k_max_F5[i - 1]); for (cnt1 = k_min_F5[i - 1]; cnt1 <= k_f_max; cnt1++) { int l_f_max = MIN2(l - d1b, l_max_F5[i - 1][cnt1]); for (cnt2 = l_min_F5[i - 1][cnt1]; cnt2 <= l_f_max; cnt2 += 2) { int k_c = k - d1a - cnt1; if ((k_c >= k_min_C[ij]) && (k_c <= k_max_C[ij])) { int l_c = l - d1b - cnt2; if ((l_c >= l_min_C[ij][k_c]) && (l_c <= l_max_C[ij][k_c])) { if (E_F5[j][k][l / 2] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C[ij][k_c][l_c / 2] + energy)) { backtrack_f5(i - 1, cnt1, cnt2, structure, vc); backtrack_c(i, j, k_c, l_c, structure, vc); return; } } } } } } } } vrna_message_error("backtracking failed in f5"); } PRIVATE void backtrack_c(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int p, q, pq, ij, maxp, maxD1, maxD2; int *my_iindx, *jindx, type, type_2, energy, no_close, dangles, base_d1, base_d2, d1, d2, cnt1, cnt2, cnt3, cnt4, *rtype; int **l_min_C, **l_max_C, **l_min_M, **l_max_M, **l_min_M1, **l_max_M1; int *k_min_C, *k_max_C, *k_min_M, *k_max_M, *k_min_M1, *k_max_M1; int ***E_C, ***E_M, ***E_M1, *E_C_rem, *E_M_rem, *E_M1_rem; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *ptype, *sequence; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; sequence = vc->sequence; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M = matrices->E_M; l_min_M = matrices->l_min_M; l_max_M = matrices->l_max_M; k_min_M = matrices->k_min_M; k_max_M = matrices->k_max_M; E_M1 = matrices->E_M1; l_min_M1 = matrices->l_min_M1; l_max_M1 = matrices->l_max_M1; k_min_M1 = matrices->k_min_M1; k_max_M1 = matrices->k_max_M1; E_C_rem = matrices->E_C_rem; E_M_rem = matrices->E_M_rem; E_M1_rem = matrices->E_M1_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; ij = my_iindx[i] - j; int e = (k == -1) ? E_C_rem[ij] : E_C[ij][k][l / 2]; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); structure[i - 1] = '('; structure[j - 1] = ')'; base_d1 = ((unsigned int)vc->reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)vc->reference_pt2[i] != j) ? 1 : -1; base_d1 += referenceBPs1[ij]; base_d2 += referenceBPs2[ij]; if (k == -1) { if (((unsigned int)base_d1 > maxD1) || ((unsigned int)base_d2 > maxD2)) if (e == E_Hairpin(j - i - 1, type, S1[i + 1], S1[j - 1], sequence + i - 1, P)) return; } else { if ((unsigned int)base_d1 == k) if ((unsigned int)base_d2 == l) if (E_Hairpin(j - i - 1, type, S1[i + 1], S1[j - 1], sequence + i - 1, P) == e) return; } maxp = MIN2(j - 2 - TURN, i + MAXLOOP + 1); for (p = i + 1; p <= maxp; p++) { unsigned int minq, ln_pre; minq = p + TURN + 1; ln_pre = j - i - 1; if (ln_pre > minq + MAXLOOP) minq = ln_pre - MAXLOOP - 1; for (q = minq; q < j; q++) { pq = my_iindx[p] - q; type_2 = ptype[jindx[q] + p]; if (type_2 == 0) continue; type_2 = rtype[type_2]; /* d2 = dbp(S_{i,j}, S_{p.q} + {i,j}) */ d1 = base_d1 - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[pq]; energy = E_IntLoop(p - i - 1, j - q - 1, type, type_2, S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); if (k == -1) { if (E_C_rem[pq] != INF) { if (e == (E_C_rem[pq] + energy)) { backtrack_c(p, q, -1, -1, structure, vc); return; } } if (E_C[pq]) { for (cnt1 = k_min_C[pq]; cnt1 <= k_max_C[pq]; cnt1++) for (cnt2 = l_min_C[pq][cnt1]; cnt2 <= l_max_C[pq][cnt1]; cnt2 += 2) { if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (e == (E_C[pq][cnt1][cnt2 / 2] + energy)) { backtrack_c(p, q, cnt1, cnt2, structure, vc); return; } } } } } else { if (!E_C[pq]) continue; if (d1 <= k && d2 <= l) { if ((k - d1 >= k_min_C[pq]) && (k - d1) <= k_max_C[pq]) { if ((l - d2 >= l_min_C[pq][k - d1]) && (l - d2 <= l_max_C[pq][k - d1])) { if (E_C[pq][k - d1][(l - d2) / 2] + energy == e) { backtrack_c(p, q, k - d1, l - d2, structure, vc); return; } } } } } } /* end q-loop */ } /* end p-loop */ /* multi-loop decomposition ------------------------*/ if (!no_close) { unsigned int u; int tt; if (k == -1) { for (u = i + TURN + 2; u < j - TURN - 2; u++) { int i1u, u1j1; i1u = my_iindx[i + 1] - u; u1j1 = my_iindx[u + 1] - j + 1; tt = rtype[type]; energy = P->MLclosing; if (dangles == 2) energy += E_MLstem(tt, S1[j - 1], S1[i + 1], P); else energy += E_MLstem(tt, -1, -1, P); if (E_M_rem[i1u] != INF) { if (E_M1[u1j1]) { for (cnt1 = k_min_M1[u1j1]; cnt1 <= k_max_M1[u1j1]; cnt1++) for (cnt2 = l_min_M1[u1j1][cnt1]; cnt2 <= l_max_M1[u1j1][cnt1]; cnt2 += 2) { if (e == (E_M_rem[i1u] + E_M1[u1j1][cnt1][cnt2 / 2] + energy)) { backtrack_m(i + 1, u, -1, -1, structure, vc); backtrack_m1(u + 1, j - 1, cnt1, cnt2, structure, vc); return; } } } if (E_M1_rem[u1j1] != INF) { if (e == (E_M_rem[i1u] + E_M1_rem[u1j1] + energy)) { backtrack_m(i + 1, u, -1, -1, structure, vc); backtrack_m1(u + 1, j - 1, -1, -1, structure, vc); return; } } } if (E_M1_rem[u1j1] != INF) { if (E_M[i1u]) { for (cnt1 = k_min_M[i1u]; cnt1 <= k_max_M[i1u]; cnt1++) for (cnt2 = l_min_M[i1u][cnt1]; cnt2 <= l_max_M[i1u][cnt1]; cnt2 += 2) if (e == (E_M[i1u][cnt1][cnt2 / 2] + E_M1_rem[u1j1] + energy)) { backtrack_m(i + 1, u, cnt1, cnt2, structure, vc); backtrack_m1(u + 1, j - 1, -1, -1, structure, vc); return; } } } /* now all cases where we exceed the maxD1/D2 scope by combination of E_M and E_M1 */ if (!E_M[i1u]) continue; if (!E_M1[u1j1]) continue; /* get distance to reference if closing this multiloop * dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1}) */ d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1]; d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1]; for (cnt1 = matrices->k_min_M[i1u]; cnt1 <= matrices->k_max_M[i1u]; cnt1++) for (cnt2 = matrices->l_min_M[i1u][cnt1]; cnt2 <= matrices->l_max_M[i1u][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_M1[u1j1]; cnt3 <= matrices->k_max_M1[u1j1]; cnt3++) for (cnt4 = matrices->l_min_M1[u1j1][cnt3]; cnt4 <= matrices->l_max_M1[u1j1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (e == (E_M[i1u][cnt1][cnt2 / 2] + E_M1[u1j1][cnt3][cnt4 / 2] + energy)) { backtrack_m(i + 1, u, cnt1, cnt2, structure, vc); backtrack_m1(u + 1, j - 1, cnt3, cnt4, structure, vc); return; } } } } } else { for (u = i + TURN + 2; u < j - TURN - 2; u++) { int i1u, u1j1; i1u = my_iindx[i + 1] - u; u1j1 = my_iindx[u + 1] - j + 1; if (!E_M[i1u]) continue; if (!E_M1[u1j1]) continue; /* get distance to reference if closing this multiloop * dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1}) */ d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1]; d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1]; tt = rtype[type]; energy = P->MLclosing; if (dangles == 2) energy += E_MLstem(tt, S1[j - 1], S1[i + 1], P); else energy += E_MLstem(tt, -1, -1, P); if ((d1 <= k) && (d2 <= l)) { for (cnt1 = k_min_M[i1u]; cnt1 <= MIN2(k - d1, k_max_M[i1u]); cnt1++) for (cnt2 = l_min_M[i1u][cnt1]; cnt2 <= MIN2(l - d2, l_max_M[i1u][cnt1]); cnt2 += 2) if (((k - d1 - cnt1) >= k_min_M1[u1j1]) && ((k - d1 - cnt1) <= k_max_M1[u1j1])) { if (((l - d2 - cnt2) >= l_min_M1[u1j1][k - d1 - cnt1]) && ((l - d2 - cnt2) <= l_max_M1[u1j1][k - d1 - cnt1])) { if (e == (energy + E_M[i1u][cnt1][cnt2 / 2] + E_M1[u1j1][k - d1 - cnt1][(l - d2 - cnt2) / 2])) { backtrack_m(i + 1, u, cnt1, cnt2, structure, vc); backtrack_m1(u + 1, j - 1, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } } } vrna_message_error("backtracking failed in c"); } PRIVATE void backtrack_m(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int u, ij, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2; int *my_iindx, *jindx, type, energy, dangles, circ, cnt1, cnt2, cnt3, cnt4; int **l_min_C, **l_max_C, **l_min_M, **l_max_M; int *k_min_C, *k_max_C, *k_min_M, *k_max_M; int ***E_C, ***E_M, *E_C_rem, *E_M_rem; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *ptype; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; seq_length = vc->length; S1 = vc->sequence_encoding; circ = md->circ; ptype = vc->ptype; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M = matrices->E_M; l_min_M = matrices->l_min_M; l_max_M = matrices->l_max_M; k_min_M = matrices->k_min_M; k_max_M = matrices->k_max_M; E_C_rem = matrices->E_C_rem; E_M_rem = matrices->E_M_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; ij = my_iindx[i] - j; int e = (k == -1) ? E_M_rem[ij] : E_M[ij][k][l / 2]; base_d1 = referenceBPs1[ij]; base_d2 = referenceBPs2[ij]; if (k == -1) { /* new_fML = ML(i+1,j)+c */ d1 = base_d1 - referenceBPs1[my_iindx[i + 1] - j]; d2 = base_d2 - referenceBPs2[my_iindx[i + 1] - j]; if (E_M_rem[my_iindx[i + 1] - j] != INF) { if (e == (E_M_rem[my_iindx[i + 1] - j] + P->MLbase)) { backtrack_m(i + 1, j, -1, -1, structure, vc); return; } } if (E_M[my_iindx[i + 1] - j]) { for (cnt1 = k_min_M[my_iindx[i + 1] - j]; cnt1 <= k_max_M[my_iindx[i + 1] - j]; cnt1++) for (cnt2 = l_min_M[my_iindx[i + 1] - j][cnt1]; cnt2 <= l_max_M[my_iindx[i + 1] - j][cnt1]; cnt2 += 2) if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (e == (E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase)) { backtrack_m(i + 1, j, cnt1, cnt2, structure, vc); return; } } } /* new_fML = min(ML(i,j-1) + c, new_fML) */ d1 = base_d1 - referenceBPs1[ij + 1]; d2 = base_d2 - referenceBPs2[ij + 1]; if (E_M_rem[ij + 1] != INF) { if (e == (E_M_rem[ij + 1] + P->MLbase)) { backtrack_m(i, j - 1, -1, -1, structure, vc); return; } } if (E_M[ij + 1]) { for (cnt1 = k_min_M[ij + 1]; cnt1 <= k_max_M[ij + 1]; cnt1++) for (cnt2 = l_min_M[ij + 1][cnt1]; cnt2 <= l_max_M[ij + 1][cnt1]; cnt2 += 2) if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (e == (E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase)) { backtrack_m(i, j - 1, cnt1, cnt2, structure, vc); return; } } } /* new_fML = min(new_fML, C(i,j)+b) */ if (E_C_rem[ij] != INF) { type = ptype[jindx[j] + i]; if (dangles == 2) energy = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if (e == (E_C_rem[ij] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); return; } } /* modular decomposition -------------------------------*/ for (u = i + 1 + TURN; u <= j - 2 - TURN; u++) { int iu, uj; iu = my_iindx[i] - u; uj = my_iindx[u + 1] - j; type = ptype[jindx[j] + u + 1]; d1 = base_d1 - referenceBPs1[iu] - referenceBPs1[uj]; d2 = base_d2 - referenceBPs2[iu] - referenceBPs2[uj]; if (dangles == 2) energy = E_MLstem(type, S1[u], (j < seq_length) || circ ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if (E_M_rem[iu] != INF) { if (E_C[uj]) { for (cnt1 = k_min_C[uj]; cnt1 <= k_max_C[uj]; cnt1++) for (cnt2 = l_min_C[uj][cnt1]; cnt2 <= l_max_C[uj][cnt1]; cnt2 += 2) if (e == (E_M_rem[iu] + E_C[uj][cnt1][cnt2 / 2] + energy)) { backtrack_m(i, u, -1, -1, structure, vc); backtrack_c(u + 1, j, cnt1, cnt2, structure, vc); return; } } if (E_C_rem[uj] != INF) { if (e == (E_M_rem[iu] + E_C_rem[uj] + energy)) { backtrack_m(i, u, -1, -1, structure, vc); backtrack_c(u + 1, j, -1, -1, structure, vc); return; } } } if (E_C_rem[uj] != INF) { if (E_M[iu]) { for (cnt1 = k_min_M[iu]; cnt1 <= k_max_M[iu]; cnt1++) for (cnt2 = l_min_M[iu][cnt1]; cnt2 <= l_max_M[iu][cnt1]; cnt2 += 2) if (e == (E_M[iu][cnt1][cnt2 / 2] + E_C_rem[uj] + energy)) { backtrack_m(i, u, cnt1, cnt2, structure, vc); backtrack_c(u + 1, j, -1, -1, structure, vc); return; } } } if (!E_M[iu]) continue; if (!E_C[uj]) continue; for (cnt1 = k_min_M[iu]; cnt1 <= k_max_M[iu]; cnt1++) for (cnt2 = l_min_M[iu][cnt1]; cnt2 <= l_max_M[iu][cnt1]; cnt2 += 2) for (cnt3 = k_min_C[uj]; cnt3 <= k_max_C[uj]; cnt3++) { for (cnt4 = l_min_C[uj][cnt3]; cnt4 <= l_max_C[uj][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (e == (E_M[iu][cnt1][cnt2 / 2] + E_C[uj][cnt3][cnt4 / 2] + energy)) { backtrack_m(i, u, cnt1, cnt2, structure, vc); backtrack_c(u + 1, j, cnt3, cnt4, structure, vc); return; } } } } } /* end if (k == -1) */ else { d1 = base_d1 - referenceBPs1[my_iindx[i + 1] - j]; d2 = base_d2 - referenceBPs2[my_iindx[i + 1] - j]; /* new_fML = ML(i+1,j)+c */ if (d1 <= k && d2 <= l) { if ((k - d1 >= k_min_M[my_iindx[i + 1] - j]) && (k - d1 <= k_max_M[my_iindx[i + 1] - j])) { if ((l - d2 >= l_min_M[my_iindx[i + 1] - j][k - d1]) && (l - d2 <= l_max_M[my_iindx[i + 1] - j][k - d1])) { if (E_M[my_iindx[i + 1] - j][k - d1][(l - d2) / 2] + P->MLbase == e) { backtrack_m(i + 1, j, k - d1, l - d2, structure, vc); return; } } } } d1 = base_d1 - referenceBPs1[ij + 1]; d2 = base_d2 - referenceBPs2[ij + 1]; /* new_fML = min(ML(i,j-1) + c, new_fML) */ if (E_M[ij + 1]) { if (d1 <= k && d2 <= l) { if ((k - d1 >= k_min_M[ij + 1]) && (k - d1 <= k_max_M[ij + 1])) { if ((l - d2 >= l_min_M[ij + 1][k - d1]) && (l - d2 <= l_max_M[ij + 1][k - d1])) { if (E_M[ij + 1][k - d1][(l - d2) / 2] + P->MLbase == e) { backtrack_m(i, j - 1, k - d1, l - d2, structure, vc); return; } } } } } /* new_fML = min(new_fML, C(i,j)+b) */ if (E_C[ij]) { type = ptype[jindx[j] + i]; if (dangles == 2) energy = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if ((k >= k_min_C[ij]) && (k <= k_max_C[ij])) { if ((l >= l_min_C[ij][k]) && (l <= l_max_C[ij][k])) { if (E_C[ij][k][l / 2] + energy == e) { backtrack_c(i, j, k, l, structure, vc); return; } } } } /* modular decomposition -------------------------------*/ for (u = i + 1 + TURN; u <= j - 2 - TURN; u++) { if (!E_M[my_iindx[i] - u]) continue; if (!E_C[my_iindx[u + 1] - j]) continue; type = ptype[jindx[j] + u + 1]; d1 = base_d1 - referenceBPs1[my_iindx[i] - u] - referenceBPs1[my_iindx[u + 1] - j]; d2 = base_d2 - referenceBPs2[my_iindx[i] - u] - referenceBPs2[my_iindx[u + 1] - j]; if (dangles == 2) energy = E_MLstem(type, S1[u], ((j < seq_length) || circ) ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if (d1 <= k && d2 <= l) { for (cnt1 = k_min_M[my_iindx[i] - u]; cnt1 <= MIN2(k - d1, k_max_M[my_iindx[i] - u]); cnt1++) for (cnt2 = l_min_M[my_iindx[i] - u][cnt1]; cnt2 <= MIN2(l - d2, l_max_M[my_iindx[i] - u][cnt1]); cnt2 += 2) if ((k - d1 - cnt1 >= k_min_C[my_iindx[u + 1] - j]) && (k - d1 - cnt1 <= k_max_C[my_iindx[u + 1] - j])) { if ((l - d2 - cnt2 >= l_min_C[my_iindx[u + 1] - j][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_C[my_iindx[u + 1] - j][k - d1 - cnt1])) { if (E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + E_C[my_iindx[u + 1] - j][k - d1 - cnt1][(l - d2 - cnt2) / 2] + energy == e) { backtrack_m(i, u, cnt1, cnt2, structure, vc); backtrack_c(u + 1, j, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } } vrna_message_error("backtracking failed in fML\n"); } PRIVATE void backtrack_m1(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int ij, seq_length, d1, d2, *referenceBPs1, *referenceBPs2, maxD1, maxD2; int *my_iindx, *jindx, **l_min_C, **l_max_C, **l_min_M1, **l_max_M1; int *k_min_C, *k_max_C, *k_min_M1, *k_max_M1, cnt1, cnt2; int ***E_C, ***E_M1, *E_C_rem, *E_M1_rem, type, dangles, circ, energy, e_m1; short *S1; char *ptype; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; seq_length = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; circ = md->circ; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M1 = matrices->E_M1; l_min_M1 = matrices->l_min_M1; l_max_M1 = matrices->l_max_M1; k_min_M1 = matrices->k_min_M1; k_max_M1 = matrices->k_max_M1; E_C_rem = matrices->E_C_rem; E_M1_rem = matrices->E_M1_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; ij = my_iindx[i] - j; e_m1 = (k == -1) ? E_M1_rem[ij] : E_M1[ij][k][l / 2]; type = ptype[jindx[j] + i]; d1 = referenceBPs1[ij] - referenceBPs1[ij + 1]; d2 = referenceBPs2[ij] - referenceBPs2[ij + 1]; if (dangles == 2) energy = E_MLstem(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if (k == -1) { if (E_C_rem[ij] != INF) { if (e_m1 == (E_C_rem[ij] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); return; } } if (E_M1_rem[ij + 1] != INF) { if (e_m1 == (E_M1_rem[ij + 1] + P->MLbase)) { backtrack_m1(i, j - 1, -1, -1, structure, vc); return; } } for (cnt1 = k_min_M1[ij + 1]; cnt1 <= k_max_M1[ij + 1]; cnt1++) for (cnt2 = l_min_M1[ij + 1][cnt1]; cnt2 <= l_max_M1[ij + 1][cnt1]; cnt2 += 2) if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (e_m1 == (E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase)) { backtrack_m1(i, j - 1, cnt1, cnt2, structure, vc); return; } } } else { if (E_C[ij]) { if ((k >= k_min_C[ij]) && (k <= k_max_C[ij])) { if ((l >= l_min_C[ij][k]) && (l <= l_max_C[ij][k])) { if (E_C[ij][k][l / 2] + energy == e_m1) { backtrack_c(i, j, k, l, structure, vc); return; } } } } if (d1 <= k && d2 <= l) { if ((k - d1 >= k_min_M1[ij + 1]) && (k - d1 <= k_max_M1[ij + 1])) { if ((l - d2 >= l_min_M1[ij + 1][k - d1]) && (l - d2 <= l_max_M1[ij + 1][k - d1])) { if (E_M1[ij + 1][k - d1][(l - d2) / 2] + P->MLbase == e_m1) { backtrack_m1(i, j - 1, k - d1, l - d2, structure, vc); return; } } } } } vrna_message_error("backtack failed in m1\n"); } PRIVATE void backtrack_fc(int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int d, i, j, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2; int *my_iindx, *jindx, energy, cnt1, cnt2, cnt3, cnt4, *rtype; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *sequence, *ptype; int **E_Fc, **E_FcH, **E_FcI, **E_FcM, ***E_C, ***E_M, ***E_M2; int *E_C_rem, *E_M_rem, *E_M2_rem, E_Fc_rem, E_FcH_rem, E_FcI_rem, E_FcM_rem; int **l_min_C, **l_max_C, *k_min_C, *k_max_C; int **l_min_M, **l_max_M, *k_min_M, *k_max_M; int **l_min_M2, **l_max_M2, *k_min_M2, *k_max_M2; int *l_min_FcH, *l_max_FcH, k_min_FcH, k_max_FcH; int *l_min_FcI, *l_max_FcI, k_min_FcI, k_max_FcI; int *l_min_FcM, *l_max_FcM, k_min_FcM, k_max_FcM; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; sequence = vc->sequence; seq_length = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; base_d1 = referenceBPs1[my_iindx[1] - seq_length]; base_d2 = referenceBPs2[my_iindx[1] - seq_length]; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M = matrices->E_M; l_min_M = matrices->l_min_M; l_max_M = matrices->l_max_M; k_min_M = matrices->k_min_M; k_max_M = matrices->k_max_M; E_M2 = matrices->E_M2; l_min_M2 = matrices->l_min_M2; l_max_M2 = matrices->l_max_M2; k_min_M2 = matrices->k_min_M2; k_max_M2 = matrices->k_max_M2; E_Fc = matrices->E_Fc; E_FcI = matrices->E_FcI; l_min_FcI = matrices->l_min_FcI; l_max_FcI = matrices->l_max_FcI; k_min_FcI = matrices->k_min_FcI; k_max_FcI = matrices->k_max_FcI; E_FcH = matrices->E_FcH; l_min_FcH = matrices->l_min_FcH; l_max_FcH = matrices->l_max_FcH; k_min_FcH = matrices->k_min_FcH; k_max_FcH = matrices->k_max_FcH; E_FcM = matrices->E_FcM; l_min_FcM = matrices->l_min_FcM; l_max_FcM = matrices->l_max_FcM; k_min_FcM = matrices->k_min_FcM; k_max_FcM = matrices->k_max_FcM; E_C_rem = matrices->E_C_rem; E_M_rem = matrices->E_M_rem; E_M2_rem = matrices->E_M2_rem; E_Fc_rem = matrices->E_Fc_rem; E_FcH_rem = matrices->E_FcH_rem; E_FcI_rem = matrices->E_FcI_rem; E_FcM_rem = matrices->E_FcM_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; if (k == -1) { /* check if mfe might be open chain */ if (E_Fc_rem == 0) if ((referenceBPs1[my_iindx[1] - seq_length] > maxD1) || (referenceBPs2[my_iindx[1] - seq_length] > maxD2)) return; /* check for hairpin configurations */ if (E_Fc_rem == E_FcH_rem) { for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij; int type, no_close; char loopseq[10]; i = j - d + 1; ij = my_iindx[i] - j; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); type = rtype[type]; if (!type) continue; if (no_close) continue; d1 = base_d1 - referenceBPs1[ij]; d2 = base_d2 - referenceBPs2[ij]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P); if (E_C_rem[ij] != INF) { if (E_Fc_rem == (E_C_rem[ij] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); return; } } if (E_C[ij]) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + energy)) { backtrack_c(i, j, cnt1, cnt2, structure, vc); return; } } } } } /* check for interior loop configurations */ if (E_Fc_rem == E_FcI_rem) { for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij, p, q, pq; int type, type_2; i = j - d + 1; ij = my_iindx[i] - j; u = seq_length - j + i - 1; if (u < TURN) continue; type = rtype[(unsigned int)ptype[jindx[j] + i]]; if (!type) continue; for (p = j + 1; p < seq_length; p++) { unsigned int u1, qmin, ln_pre; u1 = p - j - 1; if (u1 + i - 1 > MAXLOOP) break; qmin = p + TURN + 1; ln_pre = u1 + i + seq_length; if (ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1; for (q = qmin; q <= seq_length; q++) { unsigned int u2; pq = my_iindx[p] - q; type_2 = rtype[(unsigned int)ptype[jindx[q] + p]]; if (type_2 == 0) continue; u2 = i - 1 + seq_length - q; if (u1 + u2 > MAXLOOP) continue; energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P); if (E_C_rem[ij] != INF) { if (E_C[pq]) { for (cnt1 = k_min_C[pq]; cnt1 <= k_max_C[pq]; cnt1++) for (cnt2 = l_min_C[pq][cnt1]; cnt2 <= l_max_C[pq][cnt1]; cnt2 += 2) if (E_Fc_rem == (E_C_rem[ij] + E_C[pq][cnt1][cnt2 / 2] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); backtrack_c(p, q, cnt1, cnt2, structure, vc); return; } } if (E_C_rem[pq] != INF) { if (E_Fc_rem == (E_C_rem[ij] + E_C_rem[pq] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); backtrack_c(p, q, -1, -1, structure, vc); return; } } } if (E_C_rem[pq] != INF) { if (E_C[ij]) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + E_C_rem[pq] + energy)) { backtrack_c(i, j, cnt1, cnt2, structure, vc); backtrack_c(p, q, -1, -1, structure, vc); return; } } } if (!(E_C[ij])) continue; if (!(E_C[pq])) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j}) */ d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_C[pq]; cnt3 <= k_max_C[pq]; cnt3++) for (cnt4 = l_min_C[pq][cnt3]; cnt4 <= l_max_C[pq][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + E_C[pq][cnt3][cnt4 / 2] + energy)) { backtrack_c(i, j, cnt1, cnt2, structure, vc); backtrack_c(p, q, cnt3, cnt4, structure, vc); return; } } } /* end for p */ } /* end for q */ } } /* check for multi loop configurations */ if (E_Fc_rem == E_FcM_rem) { if (seq_length > 2 * TURN) { for (i = TURN + 1; i < seq_length - 2 * TURN; i++) { /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ if (E_M_rem[my_iindx[1] - i] != INF) { if (E_M2[i + 1]) { for (cnt1 = k_min_M2[i + 1]; cnt1 <= k_max_M2[i + 1]; cnt1++) for (cnt2 = l_min_M2[i + 1][cnt1]; cnt2 <= l_max_M2[i + 1][cnt1]; cnt2 += 2) if (E_Fc_rem == (E_M_rem[my_iindx[1] - i] + E_M2[i + 1][cnt1][cnt2 / 2] + P->MLclosing)) { backtrack_m(1, i, -1, -1, structure, vc); backtrack_m2(i + 1, cnt1, cnt2, structure, vc); return; } } if (E_M2_rem[i + 1] != INF) { if (E_Fc_rem == (E_M_rem[my_iindx[1] - i] + E_M2_rem[i + 1] + P->MLclosing)) { backtrack_m(1, i, -1, -1, structure, vc); backtrack_m2(i + 1, -1, -1, structure, vc); return; } } } if (E_M2_rem[i + 1] != INF) { if (E_M[my_iindx[1] - i]) { for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2) if (E_Fc_rem == (E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2_rem[i + 1] + P->MLclosing)) { backtrack_m(1, i, cnt1, cnt2, structure, vc); backtrack_m2(i + 1, -1, -1, structure, vc); return; } } } if (!(E_M[my_iindx[1] - i])) continue; if (!(E_M2[i + 1])) continue; d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length]; d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length]; for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2) for (cnt3 = k_min_M2[i + 1]; cnt3 <= k_max_M2[i + 1]; cnt3++) for (cnt4 = l_min_M2[i + 1][cnt3]; cnt4 <= l_max_M2[i + 1][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (E_Fc_rem == (E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2[i + 1][cnt3][cnt4 / 2] + P->MLclosing)) { backtrack_m(1, i, cnt1, cnt2, structure, vc); backtrack_m2(i + 1, cnt3, cnt4, structure, vc); return; } } } } } } else { /* open chain ? */ if (E_Fc[k][l / 2] == 0) if ((k == referenceBPs1[my_iindx[1] - seq_length]) && (l == referenceBPs2[my_iindx[1] - seq_length])) return; if ((k >= k_min_FcH) && (k <= k_max_FcH)) { if ((l >= l_min_FcH[k]) && (l <= l_max_FcH[k])) { if (E_Fc[k][l / 2] == E_FcH[k][l / 2]) { for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij; int type, no_close; char loopseq[10]; i = j - d + 1; ij = my_iindx[i] - j; if (!E_C[ij]) continue; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); type = rtype[type]; if (!type) continue; if (no_close) continue; d1 = base_d1 - referenceBPs1[ij]; d2 = base_d2 - referenceBPs2[ij]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P); if ((k >= d1) && (l >= d2)) { if ((k - d1 >= k_min_C[ij]) && (k - d1 <= k_max_C[ij])) { if ((l - d2 >= l_min_C[ij][k - d1]) && (l - d2 <= l_max_C[ij][k - d1])) { if (E_Fc[k][l / 2] == E_C[ij][k - d1][(l - d2) / 2] + energy) { backtrack_c(i, j, k - d1, l - d2, structure, vc); return; } } } } } } } } if ((k >= k_min_FcI) && (k <= k_max_FcI)) { if ((l >= l_min_FcI[k]) && (l <= l_max_FcI[k])) { if (E_Fc[k][l / 2] == E_FcI[k][l / 2]) { for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij, p, q, pq; int type, type_2; i = j - d + 1; ij = my_iindx[i] - j; if (!E_C[ij]) continue; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; type = rtype[type]; if (!type) continue; for (p = j + 1; p < seq_length; p++) { unsigned int u1, qmin, ln_pre; u1 = p - j - 1; if (u1 + i - 1 > MAXLOOP) break; qmin = p + TURN + 1; ln_pre = u1 + i + seq_length; if (ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1; for (q = qmin; q <= seq_length; q++) { unsigned int u2; pq = my_iindx[p] - q; if (!E_C[pq]) continue; type_2 = rtype[(unsigned int)ptype[jindx[q] + p]]; if (type_2 == 0) continue; u2 = i - 1 + seq_length - q; if (u1 + u2 > MAXLOOP) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j}) */ d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P); if ((k >= d1) && (l >= d2)) { for (cnt1 = k_min_C[ij]; cnt1 <= MIN2(k_max_C[ij], k - d1); cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= MIN2(l_max_C[ij][cnt1], l - d2); cnt2 += 2) if ((k - d1 - cnt1 >= k_min_C[pq]) && (k - d1 - cnt1 <= k_max_C[pq])) { if ((l - d2 - cnt2 >= l_min_C[pq][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_C[pq][k - d1 - cnt1])) { if ((E_C[ij][cnt1][cnt2 / 2] + E_C[pq][k - d1 - cnt1][(l - d2 - cnt2) / 2] + energy) == E_Fc[k][l / 2]) { backtrack_c(i, j, cnt1, cnt2, structure, vc); backtrack_c(p, q, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } } } } } } if ((k >= k_min_FcM) && (k <= k_max_FcM)) { if ((l >= l_min_FcM[k]) && (l <= l_max_FcM[k])) { if (E_Fc[k][l / 2] == E_FcM[k][l / 2]) { if (seq_length > 2 * TURN) { for (i = TURN + 1; i < seq_length - 2 * TURN; i++) { /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ if (!E_M[my_iindx[1] - i]) continue; if (!E_M2[i + 1]) continue; d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length]; d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length]; if ((k >= d1) && (l >= d2)) { for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= MIN2(k_max_M[my_iindx[1] - i], k - d1); cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= MIN2(l_max_M[my_iindx[1] - i][cnt1], l - d2); cnt2 += 2) if ((k - d1 - cnt1 >= k_min_M2[i + 1]) && (k - d1 - cnt1 <= k_max_M2[i + 1])) { if ((l - d2 - cnt2 >= l_min_M2[i + 1][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_M2[i + 1][k - d1 - cnt1])) { if ((E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2[i + 1][k - d1 - cnt1][(l - d2 - cnt2) / 2] + P->MLclosing) == E_FcM[k][l / 2]) { backtrack_m(1, i, cnt1, cnt2, structure, vc); backtrack_m2(i + 1, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } } } } } } vrna_message_error("backtack failed in fc\n"); } PRIVATE void backtrack_m2(unsigned int i, int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int j, ij, j3, n; unsigned int *referenceBPs1, *referenceBPs2; unsigned int d1, d2, base_d1, base_d2, maxD1, maxD2; int *my_iindx, cnt1, cnt2, cnt3, cnt4; int ***E_M1, ***E_M2, *E_M2_rem, *E_M1_rem, e; int **l_min_M1, **l_max_M1, *k_min_M1, *k_max_M1; vrna_mx_mfe_t *matrices; matrices = vc->matrices; n = vc->length; my_iindx = vc->iindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; E_M1 = matrices->E_M1; l_min_M1 = matrices->l_min_M1; l_max_M1 = matrices->l_max_M1; k_min_M1 = matrices->k_min_M1; k_max_M1 = matrices->k_max_M1; E_M1_rem = matrices->E_M1_rem; E_M2 = matrices->E_M2; E_M2_rem = matrices->E_M2_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; base_d1 = referenceBPs1[my_iindx[i] - n]; base_d2 = referenceBPs2[my_iindx[i] - n]; if (k == -1) { e = E_M2_rem[i]; for (j = i + TURN + 1; j < n - TURN - 1; j++) { if (E_M1_rem[my_iindx[i] - j] != INF) { if (E_M1[my_iindx[j + 1] - n]) { for (cnt1 = k_min_M1[my_iindx[j + 1] - n]; cnt1 <= k_max_M1[my_iindx[j + 1] - n]; cnt1++) for (cnt2 = l_min_M1[my_iindx[j + 1] - n][cnt1]; cnt2 <= l_max_M1[my_iindx[j + 1] - n][cnt1]; cnt2++) if (e == E_M1_rem[my_iindx[i] - j] + E_M1[my_iindx[j + 1] - n][cnt1][cnt2 / 2]) { backtrack_m1(i, j, k, l, structure, vc); backtrack_m1(j + 1, n, cnt1, cnt2, structure, vc); return; } } if (E_M1_rem[my_iindx[j + 1] - n] != INF) { if (e == E_M1_rem[my_iindx[i] - j] + E_M1_rem[my_iindx[j + 1] - n]) { backtrack_m1(i, j, k, l, structure, vc); backtrack_m1(j + 1, n, k, l, structure, vc); return; } } } if (E_M1_rem[my_iindx[j + 1] - n] != INF) { if (E_M1[my_iindx[i] - j]) { for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++) for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) if (e == E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1_rem[my_iindx[j + 1] - n]) { backtrack_m1(i, j, cnt1, cnt2, structure, vc); backtrack_m1(j + 1, n, k, l, structure, vc); return; } } } if (!E_M1[my_iindx[i] - j]) continue; if (!E_M1[my_iindx[j + 1] - n]) continue; d1 = referenceBPs1[my_iindx[i] - n] - referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[j + 1] - n]; d2 = referenceBPs2[my_iindx[i] - n] - referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[j + 1] - n]; for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++) for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) { for (cnt3 = k_min_M1[my_iindx[j + 1] - n]; cnt3 <= k_max_M1[my_iindx[j + 1] - n]; cnt3++) for (cnt4 = l_min_M1[my_iindx[j + 1] - n][cnt3]; cnt4 <= l_max_M1[my_iindx[j + 1] - n][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (e == E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - n][cnt3][cnt4 / 2]) { backtrack_m1(i, j, cnt1, cnt2, structure, vc); backtrack_m1(j + 1, n, cnt3, cnt4, structure, vc); return; } } } } } } else { for (j = i + TURN + 1; j < n - TURN - 1; j++) { if (!E_M1[my_iindx[i] - j]) continue; if (!E_M1[my_iindx[j + 1] - n]) continue; ij = my_iindx[i] - j; j3 = my_iindx[j + 1] - n; d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[j3]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[j3]; for (cnt1 = k_min_M1[ij]; cnt1 <= MIN2(k_max_M1[ij], k - d1); cnt1++) for (cnt2 = l_min_M1[ij][cnt1]; cnt2 <= MIN2(l_max_M1[ij][cnt1], l - d2); cnt2 += 2) if ((k - d1 - cnt1 >= k_min_M1[j3]) && (k - d1 - cnt1 <= k_max_M1[j3])) { if ((l - d2 - cnt2 >= l_min_M1[j3][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_M1[j3][k - d1 - cnt1])) { if (E_M1[ij][cnt1][cnt2 / 2] + E_M1[j3][k - d1 - cnt1][(l - d2 - cnt2) / 2] == E_M2[i][k][l / 2]) { backtrack_m1(i, j, cnt1, cnt2, structure, vc); backtrack_m1(j + 1, n, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } vrna_message_error("backtack failed in m2\n"); } PRIVATE void mfe_circ(vrna_fold_compound_t *vc) { unsigned int d, i, j, maxD1, maxD2, seq_length, *referenceBPs1, *referenceBPs2, d1, d2, base_d1, base_d2, *mm1, *mm2, *bpdist; int *my_iindx, *jindx, energy, cnt1, cnt2, cnt3, cnt4, *rtype; short *S1; char *sequence, *ptype; int ***E_C, ***E_M, ***E_M1; int *E_C_rem, *E_M_rem, *E_M1_rem; int **l_min_C, **l_max_C, **l_min_M, **l_max_M, **l_min_M1, **l_max_M1; int *k_min_C, *k_max_C, *k_min_M, *k_max_M, *k_min_M1, *k_max_M1; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; sequence = vc->sequence; seq_length = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; mm1 = vc->mm1; mm2 = vc->mm2; bpdist = vc->bpdist; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M = matrices->E_M; l_min_M = matrices->l_min_M; l_max_M = matrices->l_max_M; k_min_M = matrices->k_min_M; k_max_M = matrices->k_max_M; E_M1 = matrices->E_M1; l_min_M1 = matrices->l_min_M1; l_max_M1 = matrices->l_max_M1; k_min_M1 = matrices->k_min_M1; k_max_M1 = matrices->k_max_M1; E_C_rem = matrices->E_C_rem; E_M_rem = matrices->E_M_rem; E_M1_rem = matrices->E_M1_rem; #ifdef _OPENMP #pragma omp parallel for private(d1,d2,cnt1,cnt2,cnt3,cnt4,j, i) #endif for (i = 1; i < seq_length - TURN - 1; i++) { /* guess memory requirements for M2 */ int min_k, max_k, max_l, min_l; int min_k_real, max_k_real, *min_l_real, *max_l_real; min_k = min_l = 0; max_k = mm1[my_iindx[i] - seq_length] + referenceBPs1[my_iindx[i] - seq_length]; max_l = mm2[my_iindx[i] - seq_length] + referenceBPs2[my_iindx[i] - seq_length]; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[i] - seq_length], &matrices->k_min_M2[i], &matrices->k_max_M2[i], &matrices->l_min_M2[i], &matrices->l_max_M2[i] ); prepareArray(&matrices->E_M2[i], matrices->k_min_M2[i], matrices->k_max_M2[i], matrices->l_min_M2[i], matrices->l_max_M2[i] ); preparePosteriorBoundaries(matrices->k_max_M2[i] - matrices->k_min_M2[i] + 1, matrices->k_min_M2[i], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); /* begin filling of M2 array */ for (j = i + TURN + 1; j < seq_length - TURN - 1; j++) { if (E_M1_rem[my_iindx[i] - j] != INF) { if (E_M1[my_iindx[j + 1] - seq_length]) { for (cnt1 = k_min_M1[my_iindx[j + 1] - seq_length]; cnt1 <= k_max_M1[my_iindx[j + 1] - seq_length]; cnt1++) for (cnt2 = l_min_M1[my_iindx[j + 1] - seq_length][cnt1]; cnt2 <= l_max_M1[my_iindx[j + 1] - seq_length][cnt1]; cnt2++) matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1_rem[my_iindx[i] - j] + E_M1[my_iindx[j + 1] - seq_length][cnt1][cnt2 / 2] ); } if (E_M1_rem[my_iindx[j + 1] - seq_length] != INF) matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1_rem[my_iindx[i] - j] + E_M1_rem[my_iindx[j + 1] - seq_length]); } if (E_M1_rem[my_iindx[j + 1] - seq_length] != INF) { if (E_M1[my_iindx[i] - j]) { for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++) for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1_rem[my_iindx[j + 1] - seq_length] ); } } if (!E_M1[my_iindx[i] - j]) continue; if (!E_M1[my_iindx[j + 1] - seq_length]) continue; d1 = referenceBPs1[my_iindx[i] - seq_length] - referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[j + 1] - seq_length]; d2 = referenceBPs2[my_iindx[i] - seq_length] - referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[j + 1] - seq_length]; for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++) for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) { for (cnt3 = k_min_M1[my_iindx[j + 1] - seq_length]; cnt3 <= k_max_M1[my_iindx[j + 1] - seq_length]; cnt3++) for (cnt4 = l_min_M1[my_iindx[j + 1] - seq_length][cnt3]; cnt4 <= l_max_M1[my_iindx[j + 1] - seq_length][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) { matrices->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2(matrices->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2], E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - seq_length][cnt3][cnt4 / 2] ); updatePosteriorBoundaries(cnt1 + cnt3 + d1, cnt2 + cnt4 + d2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } else { matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - seq_length][cnt3][cnt4 / 2] ); } } } } /* resize and move memory portions of energy matrix E_M2 */ adjustArrayBoundaries(&matrices->E_M2[i], &matrices->k_min_M2[i], &matrices->k_max_M2[i], &matrices->l_min_M2[i], &matrices->l_max_M2[i], min_k_real, max_k_real, min_l_real, max_l_real ); } /* end for i */ base_d1 = referenceBPs1[my_iindx[1] - seq_length]; base_d2 = referenceBPs2[my_iindx[1] - seq_length]; /* guess memory requirements for E_FcH, E_FcI and E_FcM */ int min_k, max_k, max_l, min_l; int min_k_real, max_k_real, min_k_real_fcH, max_k_real_fcH, min_k_real_fcI, max_k_real_fcI, min_k_real_fcM, max_k_real_fcM; int *min_l_real, *max_l_real, *min_l_real_fcH, *max_l_real_fcH, *min_l_real_fcI, *max_l_real_fcI, *min_l_real_fcM, *max_l_real_fcM; max_l_real_fcM = min_l_real_fcM = NULL; max_l_real_fcI = min_l_real_fcI = NULL; max_l_real_fcH = min_l_real_fcH = NULL; max_l_real = min_l_real = NULL; min_k = min_l = 0; max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length]; max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length]; #ifdef _OPENMP #pragma omp sections { #pragma omp section { #endif prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Fc, &matrices->k_max_Fc, &matrices->l_min_Fc, &matrices->l_max_Fc ); prepareArray(&matrices->E_Fc, matrices->k_min_Fc, matrices->k_max_Fc, matrices->l_min_Fc, matrices->l_max_Fc ); #ifdef _OPENMP } #pragma omp section { #endif prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_FcH, &matrices->k_max_FcH, &matrices->l_min_FcH, &matrices->l_max_FcH ); prepareArray(&matrices->E_FcH, matrices->k_min_FcH, matrices->k_max_FcH, matrices->l_min_FcH, matrices->l_max_FcH ); #ifdef _OPENMP } #pragma omp section { #endif prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_FcI, &matrices->k_max_FcI, &matrices->l_min_FcI, &matrices->l_max_FcI ); prepareArray(&matrices->E_FcI, matrices->k_min_FcI, matrices->k_max_FcI, matrices->l_min_FcI, matrices->l_max_FcI ); #ifdef _OPENMP } #pragma omp section { #endif prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_FcM, &matrices->k_max_FcM, &matrices->l_min_FcM, &matrices->l_max_FcM ); prepareArray(&matrices->E_FcM, matrices->k_min_FcM, matrices->k_max_FcM, matrices->l_min_FcM, matrices->l_max_FcM ); #ifdef _OPENMP } #pragma omp section { #endif preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef _OPENMP } #pragma omp section { #endif preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_fcH, &max_k_real_fcH, &min_l_real_fcH, &max_l_real_fcH ); #ifdef _OPENMP } #pragma omp section { #endif preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_fcI, &max_k_real_fcI, &min_l_real_fcI, &max_l_real_fcI ); #ifdef _OPENMP } #pragma omp section { #endif preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_fcM, &max_k_real_fcM, &min_l_real_fcM, &max_l_real_fcM ); #ifdef _OPENMP } } #endif /* begin actual energy calculations */ #ifdef _OPENMP #pragma omp sections private(d, d1,d2,cnt1,cnt2,cnt3,cnt4,j, i, energy) { #pragma omp section { #endif for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij; int type, no_close; char loopseq[10]; i = j - d + 1; ij = my_iindx[i] - j; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); type = rtype[type]; if (!type) continue; if (no_close) continue; d1 = base_d1 - referenceBPs1[ij]; d2 = base_d2 - referenceBPs2[ij]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P); if (E_C_rem[ij] != INF) matrices->E_FcH_rem = MIN2(matrices->E_FcH_rem, E_C_rem[ij] + energy); if (!E_C[ij]) continue; for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) { if (((cnt1 + d1) <= maxD1) && ((cnt2 + d2) <= maxD2)) { matrices->E_FcH[cnt1 + d1][(cnt2 + d2) / 2] = MIN2(matrices->E_FcH[cnt1 + d1][(cnt2 + d2) / 2], energy + E_C[ij][cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1 + d1, cnt2 + d2, &min_k_real_fcH, &max_k_real_fcH, &min_l_real_fcH, &max_l_real_fcH ); } else { matrices->E_FcH_rem = MIN2(matrices->E_FcH_rem, energy + E_C[ij][cnt1][cnt2 / 2]); } } } /* end of i-j loop */ /* resize and move memory portions of energy matrix E_FcH */ adjustArrayBoundaries(&matrices->E_FcH, &matrices->k_min_FcH, &matrices->k_max_FcH, &matrices->l_min_FcH, &matrices->l_max_FcH, min_k_real_fcH, max_k_real_fcH, min_l_real_fcH, max_l_real_fcH ); #ifdef _OPENMP } #pragma omp section { #endif for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij, p, q, pq; int type, type_2, no_close; i = j - d + 1; ij = my_iindx[i] - j; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); type = rtype[type]; if (!type) continue; if (no_close) continue; if (E_C_rem[ij] != INF) { for (p = j + 1; p < seq_length; p++) { unsigned int u1, qmin, ln_pre; u1 = p - j - 1; if (u1 + i - 1 > MAXLOOP) break; qmin = p + TURN + 1; ln_pre = u1 + i + seq_length; if (ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1; for (q = qmin; q <= seq_length; q++) { unsigned int u2; pq = my_iindx[p] - q; type_2 = rtype[(unsigned int)ptype[jindx[q] + p]]; if (type_2 == 0) continue; u2 = i - 1 + seq_length - q; if (u1 + u2 > MAXLOOP) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j}) */ d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P); if (E_C_rem[pq] != INF) matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C_rem[ij] + E_C_rem[pq] + energy); if (E_C[pq]) { for (cnt1 = k_min_C[pq]; cnt1 <= k_max_C[pq]; cnt1++) for (cnt2 = l_min_C[pq][cnt1]; cnt2 <= l_max_C[pq][cnt1]; cnt2 += 2) matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C_rem[ij] + E_C[pq][cnt1][cnt2 / 2] + energy); } } } } if (E_C[ij]) { for (p = j + 1; p < seq_length; p++) { unsigned int u1, qmin, ln_pre; u1 = p - j - 1; if (u1 + i - 1 > MAXLOOP) break; qmin = p + TURN + 1; ln_pre = u1 + i + seq_length; if (ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1; for (q = qmin; q <= seq_length; q++) { unsigned int u2; pq = my_iindx[p] - q; type_2 = rtype[(unsigned int)ptype[jindx[q] + p]]; if (type_2 == 0) continue; u2 = i - 1 + seq_length - q; if (u1 + u2 > MAXLOOP) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j}) */ d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P); if (E_C_rem[pq] != INF) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C[ij][cnt1][cnt2 / 2] + E_C_rem[pq] + energy); } if (E_C[pq]) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_C[pq]; cnt3 <= k_max_C[pq]; cnt3++) for (cnt4 = l_min_C[pq][cnt3]; cnt4 <= l_max_C[pq][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) { matrices->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2( matrices->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2], E_C[ij][cnt1][cnt2 / 2] + E_C[pq][cnt3][cnt4 / 2] + energy ); updatePosteriorBoundaries(cnt1 + cnt3 + d1, cnt2 + cnt4 + d2, &min_k_real_fcI, &max_k_real_fcI, &min_l_real_fcI, &max_l_real_fcI ); } else { matrices->E_FcI_rem = MIN2( matrices->E_FcI_rem, E_C[ij][cnt1][cnt2 / 2] + E_C[pq][cnt3][cnt4 / 2] + energy ); } } } } } } } /* end of i-j loop */ /* resize and move memory portions of energy matrix E_FcI */ adjustArrayBoundaries(&matrices->E_FcI, &matrices->k_min_FcI, &matrices->k_max_FcI, &matrices->l_min_FcI, &matrices->l_max_FcI, min_k_real_fcI, max_k_real_fcI, min_l_real_fcI, max_l_real_fcI ); #ifdef _OPENMP } #pragma omp section { #endif if (seq_length > 2 * TURN) { for (i = TURN + 1; i < seq_length - 2 * TURN; i++) { /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length]; d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length]; if (E_M_rem[my_iindx[1] - i] != INF) { if (matrices->E_M2[i + 1]) { for (cnt1 = matrices->k_min_M2[i + 1]; cnt1 <= matrices->k_max_M2[i + 1]; cnt1++) for (cnt2 = matrices->l_min_M2[i + 1][cnt1]; cnt2 <= matrices->l_max_M2[i + 1][cnt1]; cnt2 += 2) matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M_rem[my_iindx[1] - i] + matrices->E_M2[i + 1][cnt1][cnt2 / 2] + P->MLclosing); } if (matrices->E_M2_rem[i + 1] != INF) matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M_rem[my_iindx[1] - i] + matrices->E_M2_rem[i + 1] + P->MLclosing); } if (matrices->E_M2_rem[i + 1] != INF) { if (E_M[my_iindx[1] - i]) { for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2) matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + matrices->E_M2_rem[i + 1] + P->MLclosing); } } if (!E_M[my_iindx[1] - i]) continue; if (!matrices->E_M2[i + 1]) continue; for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_M2[i + 1]; cnt3 <= matrices->k_max_M2[i + 1]; cnt3++) for (cnt4 = matrices->l_min_M2[i + 1][cnt3]; cnt4 <= matrices->l_max_M2[i + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) { matrices->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2( matrices->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2], E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + matrices->E_M2[i + 1][cnt3][cnt4 / 2] + P->MLclosing ); updatePosteriorBoundaries(cnt1 + cnt3 + d1, cnt2 + cnt4 + d2, &min_k_real_fcM, &max_k_real_fcM, &min_l_real_fcM, &max_l_real_fcM ); } else { matrices->E_FcM_rem = MIN2( matrices->E_FcM_rem, E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + matrices->E_M2[i + 1][cnt3][cnt4 / 2] + P->MLclosing ); } } } } /* resize and move memory portions of energy matrix E_FcM */ adjustArrayBoundaries(&matrices->E_FcM, &matrices->k_min_FcM, &matrices->k_max_FcM, &matrices->l_min_FcM, &matrices->l_max_FcM, min_k_real_fcM, max_k_real_fcM, min_l_real_fcM, max_l_real_fcM ); #ifdef _OPENMP } } #endif /* compute E_Fc_rem */ matrices->E_Fc_rem = MIN2(matrices->E_FcH_rem, matrices->E_FcI_rem); matrices->E_Fc_rem = MIN2(matrices->E_Fc_rem, matrices->E_FcM_rem); /* add the case were structure is unfolded chain */ if ((referenceBPs1[my_iindx[1] - seq_length] > maxD1) || (referenceBPs2[my_iindx[1] - seq_length] > maxD2)) matrices->E_Fc_rem = MIN2(matrices->E_Fc_rem, 0); /* compute all E_Fc */ for (cnt1 = matrices->k_min_FcH; cnt1 <= matrices->k_max_FcH; cnt1++) for (cnt2 = matrices->l_min_FcH[cnt1]; cnt2 <= matrices->l_max_FcH[cnt1]; cnt2 += 2) { matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2], matrices->E_FcH[cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } for (cnt1 = matrices->k_min_FcI; cnt1 <= matrices->k_max_FcI; cnt1++) for (cnt2 = matrices->l_min_FcI[cnt1]; cnt2 <= matrices->l_max_FcI[cnt1]; cnt2 += 2) { matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2], matrices->E_FcI[cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } for (cnt1 = matrices->k_min_FcM; cnt1 <= matrices->k_max_FcM; cnt1++) for (cnt2 = matrices->l_min_FcM[cnt1]; cnt2 <= matrices->l_max_FcM[cnt1]; cnt2 += 2) { matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2], matrices->E_FcM[cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } /* add the case were structure is unfolded chain */ matrices->E_Fc[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2] = MIN2(matrices->E_Fc[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2], 0); updatePosteriorBoundaries(referenceBPs1[my_iindx[1] - seq_length], referenceBPs2[my_iindx[1] - seq_length], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); adjustArrayBoundaries(&matrices->E_Fc, &matrices->k_min_Fc, &matrices->k_max_Fc, &matrices->l_min_Fc, &matrices->l_max_Fc, min_k_real, max_k_real, min_l_real, max_l_real ); } PRIVATE void adjustArrayBoundaries(int ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_post, int k_max_post, int *l_min_post, int *l_max_post) { int cnt1; int k_diff_pre = k_min_post - *k_min; int mem_size = k_max_post - k_min_post + 1; if (k_min_post < INF) { /* free all the unused memory behind actual data */ for (cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } /* free unused memory before actual data */ for (cnt1 = *k_min; cnt1 < k_min_post; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } /* move data to front and thereby eliminating unused memory in front of actual data */ if (k_diff_pre > 0) { memmove((int **)(*array), ((int **)(*array)) + k_diff_pre, sizeof(int *) * mem_size); memmove((int *)(*l_min), ((int *)(*l_min)) + k_diff_pre, sizeof(int) * mem_size); memmove((int *)(*l_max), ((int *)(*l_max)) + k_diff_pre, sizeof(int) * mem_size); } /* reallocating memory to actual size used */ *array += *k_min; *array = (int **)realloc(*array, sizeof(int *) * mem_size); *array -= k_min_post; *l_min += *k_min; *l_min = (int *)realloc(*l_min, sizeof(int) * mem_size); *l_min -= k_min_post; *l_max += *k_min; *l_max = (int *)realloc(*l_max, sizeof(int) * mem_size); *l_max -= k_min_post; /* adjust l dimension of array */ for (cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++) { if (l_min_post[cnt1] < INF) { /* new memsize */ mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1) / 2 + 1; /* reshift the pointer */ (*array)[cnt1] += (*l_min)[cnt1] / 2; int shift = (l_min_post[cnt1] % 2 == (*l_min)[cnt1] % 2) ? 0 : 1; /* eliminate unused memory in front of actual data */ unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1]) / 2 + shift; if (start > 0) memmove((int *)((*array)[cnt1]), (int *)((*array)[cnt1]) + start, sizeof(int) * mem_size); (*array)[cnt1] = (int *)realloc((*array)[cnt1], sizeof(int) * mem_size); (*array)[cnt1] -= l_min_post[cnt1] / 2; } else { /* free according memory */ (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } (*l_min)[cnt1] = l_min_post[cnt1]; (*l_max)[cnt1] = l_max_post[cnt1]; } } else { /* we have to free all unused memory */ for (cnt1 = *k_min; cnt1 <= *k_max; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } (*l_min) += *k_min; (*l_max) += *k_min; free(*l_min); free(*l_max); (*array) += *k_min; free(*array); *array = NULL; } l_min_post += *k_min; l_max_post += *k_min; free(l_min_post); free(l_max_post); *k_min = k_min_post; *k_max = k_max_post; } PRIVATE INLINE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l) { int i; *min_k = INF; *max_k = 0; *min_l = (int *)vrna_alloc(sizeof(int) * size); *max_l = (int *)vrna_alloc(sizeof(int) * size); for (i = 0; i < size; i++) { (*min_l)[i] = INF; (*max_l)[i] = 0; } *min_l -= shift; *max_l -= shift; } PRIVATE INLINE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l) { (*min_l)[d1] = MIN2((*min_l)[d1], d2); (*max_l)[d1] = MAX2((*max_l)[d1], d2); *min_k = MIN2(*min_k, d1); *max_k = MAX2(*max_k, d1); } INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l) { int cnt; int mem = max_k_pre - min_k_pre + 1; *min_k = min_k_pre; *max_k = max_k_pre; *min_l = (int *)vrna_alloc(sizeof(int) * mem); *max_l = (int *)vrna_alloc(sizeof(int) * mem); *min_l -= min_k_pre; *max_l -= min_k_pre; /* for each k guess the according minimum l*/ for (cnt = min_k_pre; cnt <= max_k_pre; cnt++) { (*min_l)[cnt] = min_l_pre; (*max_l)[cnt] = max_l_pre; while ((*min_l)[cnt] + cnt < bpdist) (*min_l)[cnt]++; if ((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2)) (*min_l)[cnt]++; } } INLINE PRIVATE void prepareArray(int ***array, int min_k, int max_k, int *min_l, int *max_l) { int i, j, mem; *array = (int **)vrna_alloc(sizeof(int *) * (max_k - min_k + 1)); *array -= min_k; for (i = min_k; i <= max_k; i++) { mem = (max_l[i] - min_l[i] + 1) / 2 + 1; (*array)[i] = (int *)vrna_alloc(sizeof(int) * mem); for (j = 0; j < mem; j++) (*array)[i][j] = INF; (*array)[i] -= min_l[i] / 2; } } INLINE PRIVATE void prepareArray2(unsigned long ***array, int min_k, int max_k, int *min_l, int *max_l) { int i, mem; *array = (unsigned long **)vrna_alloc(sizeof(unsigned long *) * (max_k - min_k + 1)); *array -= min_k; for (i = min_k; i <= max_k; i++) { mem = (max_l[i] - min_l[i] + 1) / 2 + 1; (*array)[i] = (unsigned long *)vrna_alloc(sizeof(unsigned long) * mem); (*array)[i] -= min_l[i] / 2; } } /* ################################# # OLD API support # ################################# */ /* crosslink data from vars->compatibility to TwoDfold_vars structure */ PRIVATE INLINE void crosslink(TwoDfold_vars *vars) { vrna_fold_compound_t *c; vrna_mx_mfe_t *m; c = vars->compatibility; m = c->matrices; vars->sequence = c->sequence; vars->seq_length = c->length; vars->reference_pt1 = c->reference_pt1; vars->reference_pt2 = c->reference_pt2; vars->referenceBPs1 = c->referenceBPs1; vars->referenceBPs2 = c->referenceBPs2; vars->bpdist = c->bpdist; vars->do_backtrack = 1; vars->dangles = c->params->model_details.dangles; vars->circ = c->params->model_details.circ; vars->temperature = c->params->model_details.temperature; vars->ptype = c->ptype_pf_compat; vars->P = c->params; vars->S = c->sequence_encoding2; vars->S1 = c->sequence_encoding; vars->my_iindx = c->iindx; vars->mm1 = c->mm1; vars->mm2 = c->mm2; vars->maxD1 = c->maxD1; vars->maxD2 = c->maxD2; vars->E_C = m->E_C; vars->l_min_values = m->l_min_C; vars->l_max_values = m->l_max_C; vars->k_min_values = m->k_min_C; vars->k_max_values = m->k_max_C; vars->E_F5 = m->E_F5; vars->l_min_values_f = m->l_min_F5; vars->l_max_values_f = m->l_max_F5; vars->k_min_values_f = m->k_min_F5; vars->k_max_values_f = m->k_max_F5; vars->E_F3 = m->E_F3; vars->l_min_values_f3 = m->l_min_F3; vars->l_max_values_f3 = m->l_max_F3; vars->k_min_values_f3 = m->k_min_F3; vars->k_max_values_f3 = m->k_max_F3; vars->E_M = m->E_M; vars->l_min_values_m = m->l_min_M; vars->l_max_values_m = m->l_max_M; vars->k_min_values_m = m->k_min_M; vars->k_max_values_m = m->k_max_M; vars->E_M1 = m->E_M1; vars->l_min_values_m1 = m->l_min_M1; vars->l_max_values_m1 = m->l_max_M1; vars->k_min_values_m1 = m->k_min_M1; vars->k_max_values_m1 = m->k_max_M1; #ifdef COUNT_STATES vars->N_C = m->N_C; vars->N_F5 = m->N_F5; vars->N_M = m->N_M; vars->N_M1 = m->N_M1; #endif vars->E_M2_rem = m->E_M2_rem; vars->E_M2 = m->E_M2; vars->l_min_values_m2 = m->l_min_M2; vars->l_max_values_m2 = m->l_max_M2; vars->k_min_values_m2 = m->k_min_M2; vars->k_max_values_m2 = m->k_max_M2; vars->E_Fc = m->E_Fc; vars->E_FcH = m->E_FcH; vars->E_FcI = m->E_FcI; vars->E_FcM = m->E_FcM; vars->E_Fc_rem = m->E_Fc_rem; vars->E_FcH_rem = m->E_FcH_rem; vars->E_FcI_rem = m->E_FcI_rem; vars->E_FcM_rem = m->E_FcM_rem; vars->E_C_rem = m->E_C_rem; vars->E_M_rem = m->E_M_rem; vars->E_M1_rem = m->E_M1_rem; vars->E_F5_rem = m->E_F5_rem; } PUBLIC TwoDfold_vars * get_TwoDfold_variables(const char *seq, const char *structure1, const char *structure2, int circ) { vrna_md_t md; TwoDfold_vars *vars; set_model_details(&md); md.circ = circ; vars = (TwoDfold_vars *)vrna_alloc(sizeof(TwoDfold_vars)); vars->compatibility = vrna_fold_compound_TwoD(seq, structure1, structure2, &md, VRNA_OPTION_MFE); crosslink(vars); return vars; } PUBLIC char * TwoDfold_backtrack_f5(unsigned int j, int k, int l, TwoDfold_vars *vars) { return vrna_backtrack5_TwoD(vars->compatibility, k, l, j); } PUBLIC void destroy_TwoDfold_variables(TwoDfold_vars *vars) { if (vars == NULL) return; vrna_fold_compound_free(vars->compatibility); free(vars); } PUBLIC vrna_sol_TwoD_t * TwoDfoldList(TwoDfold_vars *vars, int distance1, int distance2) { vrna_sol_TwoD_t *sol; sol = vrna_mfe_TwoD(vars->compatibility, distance1, distance2); crosslink(vars); return sol; } PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars) { vrna_md_t md; set_model_details(&md); free(vars->compatibility->params); vars->compatibility->params = vrna_params(&md); crosslink(vars); }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
hmacSHA256_fmt_plug.c
/* * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * Based on hmac-md5 by Bartavelle * * SIMD added Feb, 2015, JimF. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hmacSHA224; extern struct fmt_main fmt_hmacSHA256; #elif FMT_REGISTERS_H john_register_one(&fmt_hmacSHA224); john_register_one(&fmt_hmacSHA256); #else #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "base64_convert.h" #include "formats.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 2048 // scaled on core i7-quad HT #endif #else #ifndef OMP_SCALE #define OMP_SCALE 512 // scaled K8-dual HT #endif #endif #endif #include "memdbg.h" #define FORMAT_LABEL "HMAC-SHA256" #define FORMAT_LABEL_224 "HMAC-SHA224" #define FORMAT_NAME "" #define ALGORITHM_NAME "password is key, SHA256 " SHA256_ALGORITHM_NAME #define ALGORITHM_NAME_224 "password is key, SHA224 " SHA256_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define PAD_SIZE 64 #define PAD_SIZE_W (PAD_SIZE/4) #define BINARY_SIZE (256/8) #define BINARY_SIZE_224 (224/8) #define BINARY_ALIGN 4 #ifndef SIMD_COEF_32 #define SALT_LENGTH 1023 #define SALT_ALIGN 1 #else #define SALT_LIMBS 5 /* 5 limbs, 311 bytes */ #define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 9) #define SALT_ALIGN MEM_ALIGN_SIMD #endif #define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + BINARY_SIZE * 2) #define CIPHERTEXT_LENGTH_224 (SALT_LENGTH + 1 + BINARY_SIZE_224 * 2) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i&63) & (0xffffffff - 3)) * SIMD_COEF_32 + (3 - ((i&63) & 3)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE * SIMD_COEF_32) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"The quick brown fox jumps over the lazy dog#f7bc83f430538424b13298e6aa6fb143ef4d59a14946175997479dbc2d1a3cd8", "key"}, {"#b613679a0814d9ec772f95d778c35fc5ff1697c493715653c6c712144292c5ad", ""}, {"Beppe#Grillo#14651BA87C7F7DA88BCE0DF1F89C223975AC0FDF9C35378CB0857A81DFD5C408", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {"jquYnUyWT5NsbvjQDZXyCxMJB6PryALZdYOZ1bEuagcUmYcbqpx5vOvpxj7VEhqW7OIzHR2O9JLDKrhuDfZxQk9jOENQb4OzEkRZmN8czdGdo7nshdYU1zcdoDGVb3YTCbjeZvazi#c8b4b8a7888787eebca16099fd076092269919bb032bfec48eed7f41d42eba9a", "magnum"}, // JWT hash. {"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.eoaDVGTClRdfxUZXiPs3f8FmJDkDE_VCQFXqKxpLsts", "secret" }, #ifndef SIMD_COEF_32 {"12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012#ff504b06ee64f3ba7fe503496b451cf46ee34109a62d55cd4bf4f38077ee8145","1234567890" }, {"012345678901234567890123456789012345678901234567890123456789#6ec69f97e81e58b4a28ee13537c84df316cf8a6250e932de1d375e72843b8f9c", "123456"}, {"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123#389c4d8db62dea4c108cf12662da3c9440149800cd1e74f3738ba804024343b7","1234567890" }, {"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789#090487f586965594ae55d366cc9bc96d9f0ce44e253e975a1ed004c8a5edcf24", "123456"}, #endif {NULL} }; static struct fmt_tests tests_224[] = { {"what do ya want for nothing?#a30e01098bc6dbbf45690f3a7e9e6d0f8bbea2a39e6148008fd05e44", "Jefe"}, {"Beppe#Grillo#926E4A97B401242EF674CEE4C60D9FC6FF73007F871008D4C11F5B95", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {NULL} }; #ifdef SIMD_COEF_32 static unsigned char *crypt_key; static unsigned char *ipad, *prep_ipad; static unsigned char *opad, *prep_opad; typedef struct cur_salt_t { unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT]; int salt_len; } cur_salt_t; static cur_salt_t *cur_salt; static int bufsize; #define SALT_SIZE sizeof(cur_salt_t) #else static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)]; static unsigned char (*opad)[PAD_SIZE]; static unsigned char (*ipad)[PAD_SIZE]; static unsigned char cur_salt[SALT_LENGTH+1]; static SHA256_CTX *ipad_ctx; static SHA256_CTX *opad_ctx; #define SALT_SIZE sizeof(cur_salt) #endif static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int new_keys; #ifdef SIMD_COEF_32 static void clear_keys(void) { memset(ipad, 0x36, bufsize); memset(opad, 0x5C, bufsize); } #endif static void init(struct fmt_main *self, const int B_LEN) { #ifdef SIMD_COEF_32 int i; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE; crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); prep_opad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { crypt_key[GETPOS(B_LEN, i)] = 0x80; ((unsigned int*)crypt_key)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + (i/SIMD_COEF_32) * PAD_SIZE_W * SIMD_COEF_32] = (B_LEN + PAD_SIZE) << 3; } clear_keys(); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad)); opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void init_256(struct fmt_main *self) { init(self, BINARY_SIZE); } static void init_224(struct fmt_main *self) { init(self, BINARY_SIZE_224); } static void done(void) { MEM_FREE(saved_plain); #ifdef SIMD_COEF_32 MEM_FREE(prep_opad); MEM_FREE(prep_ipad); #else MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); #endif MEM_FREE(opad); MEM_FREE(ipad); MEM_FREE(crypt_key); } static char *split(char *ciphertext, int index, struct fmt_main *self, const int B_LEN, const int CT_LEN) { static char out[CIPHERTEXT_LENGTH + 1]; if (strstr(ciphertext, "$SOURCE_HASH$")) return ciphertext; if (!strchr(ciphertext, '#') && strchr(ciphertext, '.') && strchr(ciphertext, '.') != strrchr(ciphertext, '.')) { // Treat this like a JWT hash. Convert into 'normal' hmac-sha256 format. char buf[BINARY_SIZE * 2 + 1], tmp[CIPHERTEXT_LENGTH + 1], *cpi; strnzcpy(tmp, ciphertext, sizeof(tmp)); cpi = strchr(tmp, '.'); cpi = strchr(&cpi[1], '.'); if (cpi - tmp + B_LEN * 2 + 1 > CT_LEN) return ciphertext; *cpi++ = 0; memset(buf, 0, sizeof(buf)); base64_convert(cpi, e_b64_mime, strlen(cpi), buf, e_b64_hex, sizeof(buf), flg_Base64_NO_FLAGS, 0); if (strlen(buf) != B_LEN * 2) return ciphertext; sprintf(out, "%s#%s", tmp, buf); } else strnzcpy(out, ciphertext, sizeof(out)); strlwr(strrchr(out, '#')); return out; } static char *split_256(char *ciphertext, int index, struct fmt_main *self) { return split(ciphertext, index, self, BINARY_SIZE, CIPHERTEXT_LENGTH); } static char *split_224(char *ciphertext, int index, struct fmt_main *self) { return split(ciphertext, index, self, BINARY_SIZE_224, CIPHERTEXT_LENGTH_224); } static int valid(char *ciphertext, struct fmt_main *self, const int B_LEN, const int CT_LEN) { int pos, i; char *p; p = strrchr(ciphertext, '#'); // allow # in salt if (!p && strchr(ciphertext, '.') && strchr(ciphertext, '.') != strrchr(ciphertext, '.')) { if (strlen(ciphertext) > CT_LEN) return 0; ciphertext = split(ciphertext, 0, self, B_LEN, CT_LEN); p = strrchr(ciphertext, '#'); } if (!p || p > &ciphertext[strlen(ciphertext)-1]) return 0; i = (int)(p - ciphertext); if (i > SALT_LENGTH) return 0; pos = i + 1; if (strlen(ciphertext + pos) != B_LEN * 2) return 0; for (i = pos; i < B_LEN * 2 + pos; i++) { if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || (('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static int valid_256(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, BINARY_SIZE, CIPHERTEXT_LENGTH); } static int valid_224(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, BINARY_SIZE_224, CIPHERTEXT_LENGTH_224); } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = salt; #else strcpy((char*)cur_salt, (char*)salt); #endif } static MAYBE_INLINE void set_key(char *key, int index, const int B_LEN) { int len; #ifdef SIMD_COEF_32 uint32_t *ipadp = (uint32_t*)&ipad[GETPOS(3, index)]; uint32_t *opadp = (uint32_t*)&opad[GETPOS(3, index)]; const uint32_t *keyp = (uint32_t*)key; unsigned int temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; SHA256_CTX ctx; int i; if (B_LEN == BINARY_SIZE) { SHA256_Init(&ctx); SHA256_Update(&ctx, key, len); SHA256_Final(k0, &ctx); } else { SHA224_Init(&ctx); SHA224_Update(&ctx, key, len); SHA224_Final(k0, &ctx); } keyp = (unsigned int*)k0; for (i = 0; i < B_LEN / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32) { temp = JOHNSWAP(*keyp++); *ipadp ^= temp; *opadp ^= temp; } } else while(((temp = JOHNSWAP(*keyp++)) & 0xff000000)) { if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00)) { ((unsigned short*)ipadp)[1] ^= (unsigned short)(temp >> 16); ((unsigned short*)opadp)[1] ^= (unsigned short)(temp >> 16); break; } *ipadp ^= temp; *opadp ^= temp; if (!(temp & 0x000000ff)) break; ipadp += SIMD_COEF_32; opadp += SIMD_COEF_32; } #else int i; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; memset(ipad[index], 0x36, PAD_SIZE); memset(opad[index], 0x5C, PAD_SIZE); if (len > PAD_SIZE) { SHA256_CTX ctx; unsigned char k0[BINARY_SIZE]; if (B_LEN == BINARY_SIZE) { SHA256_Init( &ctx ); SHA256_Update( &ctx, key, len); SHA256_Final( k0, &ctx); } else { SHA224_Init( &ctx ); SHA224_Update( &ctx, key, len); SHA224_Final( k0, &ctx); } len = B_LEN; for (i=0;i<len;i++) { ipad[index][i] ^= k0[i]; opad[index][i] ^= k0[i]; } } else for (i=0;i<len;i++) { ipad[index][i] ^= key[i]; opad[index][i] ^= key[i]; } #endif new_keys = 1; } static void set_key_256(char *key, int index) { set_key(key, index, BINARY_SIZE); } static void set_key_224(char *key, int index) { set_key(key, index, BINARY_SIZE_224); } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int index; for (index = 0; index < count; index++) { // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if (((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[(index&(SIMD_COEF_32-1))+index/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32]) return 1; } return 0; #else int index = 0; #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static MAYBE_INLINE int cmp_one(void *binary, int index, const int B_LEN) { #ifdef SIMD_COEF_32 int i; for (i = 0; i < (B_LEN/4); i++) // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if (((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], B_LEN); #endif } static int cmp_one_256(void *binary, int index) { return cmp_one(binary, index, BINARY_SIZE); } static int cmp_one_224(void *binary, int index) { return cmp_one(binary, index, BINARY_SIZE_224); } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt, #ifdef SIMD_COEF_32 const unsigned EX_FLAGS #else const int B_LEN #endif ) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 unsigned int i, *pclear; if (new_keys) { SIMDSHA256body(&ipad[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN|EX_FLAGS); SIMDSHA256body(&opad[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN|EX_FLAGS); } SIMDSHA256body(cur_salt->salt[0], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); for (i = 1; i <= (cur_salt->salt_len + 8) / PAD_SIZE; i++) SIMDSHA256body(cur_salt->salt[i], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); if (EX_FLAGS) { // NOTE, SSESHA224 will output 32 bytes. We need the first 28 (plus the 0x80 padding). // so we are forced to 'clean' this crap up, before using the crypt as the input. pclear = (unsigned int*)&crypt_key[(unsigned int)index/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32*4]; for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) pclear[28/4*SIMD_COEF_32+(i&(SIMD_COEF_32-1))+i/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32] = 0x80000000; } SIMDSHA256body(&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); #else SHA256_CTX ctx; // Note, for oSSL, we really only need SHA256_Init and SHA224_Init. From that point // on, SHA256_Update/SHA256_Final can be used. Also, jtr internal sha2.c file works // like that. BUT I am not sure every hash engine works that way, so we are keeping // the 'full' block. if (B_LEN == BINARY_SIZE) { if (new_keys) { SHA256_Init(&ipad_ctx[index]); SHA256_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA256_Init(&opad_ctx[index]); SHA256_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA256_Update( &ctx, cur_salt, strlen( (char*) cur_salt) ); SHA256_Final( (unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA256_Update( &ctx, crypt_key[index], B_LEN); SHA256_Final( (unsigned char*) crypt_key[index], &ctx); } else { if (new_keys) { SHA224_Init(&ipad_ctx[index]); SHA224_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA224_Init(&opad_ctx[index]); SHA224_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA224_Update( &ctx, cur_salt, strlen( (char*) cur_salt) ); SHA224_Final( (unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA224_Update( &ctx, crypt_key[index], B_LEN); SHA224_Final( (unsigned char*) crypt_key[index], &ctx); } #endif } new_keys = 0; return count; } static int crypt_all_256(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_32 return crypt_all(pcount, salt, 0); #else return crypt_all(pcount, salt, BINARY_SIZE); #endif } static int crypt_all_224(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_32 return crypt_all(pcount, salt, SSEi_CRYPT_SHA224); #else return crypt_all(pcount, salt, BINARY_SIZE_224); #endif } static void *get_binary(char *ciphertext, const int B_LEN) { static union toalign { unsigned char c[BINARY_SIZE]; uint32_t a[1]; } a; unsigned char *realcipher = a.c; int i,pos; for (i=strlen(ciphertext);ciphertext[i]!='#';i--); // allow # in salt pos=i+1; for (i=0;i<B_LEN;i++) realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+pos])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1+pos])]; #ifdef SIMD_COEF_32 alter_endianity(realcipher, B_LEN); #endif return (void*)realcipher; } static void *get_binary_256(char *ciphertext) { return get_binary(ciphertext, BINARY_SIZE); } static void *get_binary_224(char *ciphertext) { return get_binary(ciphertext, BINARY_SIZE_224); } static void *get_salt(char *ciphertext) { static unsigned char salt[SALT_LENGTH+1]; int len; #ifdef SIMD_COEF_32 unsigned int i = 0; static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt; int salt_len = 0; #endif // allow # in salt len = strrchr(ciphertext, '#') - ciphertext; memset(salt, 0, sizeof(salt)); memcpy(salt, ciphertext, len); #ifdef SIMD_COEF_32 memset(&cur_salt, 0, sizeof(cur_salt)); while(((unsigned char*)salt)[salt_len]) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = ((unsigned char*)salt)[salt_len]; ++salt_len; } cur_salt.salt_len = salt_len; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80; ((unsigned int*)cur_salt.salt[(salt_len + 8) / PAD_SIZE])[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32] = (salt_len + PAD_SIZE) << 3; } return &cur_salt; #else return salt; #endif } struct fmt_main fmt_hmacSHA256 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { NULL }, tests }, { init_256, done, fmt_default_reset, fmt_default_prepare, valid_256, split_256, get_binary_256, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key_256, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all_256, { fmt_default_get_hash }, cmp_all, cmp_one_256, cmp_exact } }; struct fmt_main fmt_hmacSHA224 = { { FORMAT_LABEL_224, FORMAT_NAME, ALGORITHM_NAME_224, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_224, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { NULL }, tests_224 }, { init_224, done, fmt_default_reset, fmt_default_prepare, valid_224, split_224, get_binary_224, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key_224, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all_224, { fmt_default_get_hash }, cmp_all, cmp_one_224, cmp_exact } }; #endif /* plugin stanza */
pbkdf2-hmac-md5_fmt_plug.c
/* * This software is Copyright (c) 2015 Dhiru and magnum * and it is hereby released to * the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_md5; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_md5); #else #include <ctype.h> #include <string.h> #include <assert.h> #include "arch.h" //#undef SIMD_COEF_32 #include "misc.h" #include "common.h" #include "formats.h" #include "stdint.h" #include "pbkdf2_hmac_md5.h" #include "pbkdf2_hmac_common.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "PBKDF2-HMAC-MD5" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-MD5 " MD5_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-MD5 32/" ARCH_BITS_STR #endif #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(ARCH_WORD_32) #if SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define PLAINTEXT_LENGTH 125 static struct custom_salt { unsigned int length; unsigned int rounds; char salt[PBKDF2_32_MAX_SALT_SIZE]; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[PBKDF2_MDx_BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; memset(&cs, 0, sizeof(cs)); if (!strncmp(ciphertext, PBKDF2_MD5_FORMAT_TAG, PBKDF2_MD5_TAG_LEN)) ciphertext += PBKDF2_MD5_TAG_LEN; cs.rounds = atoi(ciphertext); ciphertext = strchr(ciphertext, '$') + 1; p = strchr(ciphertext, '$'); saltlen = 0; memset(cs.salt, 0, sizeof(cs.salt)); while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void*)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #if SIMD_COEF_32 int lens[SSE_GROUP_SZ_MD5], i; unsigned char *pin[SSE_GROUP_SZ_MD5]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_MD5]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_MD5; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_md5_sse((const unsigned char **)pin, lens, (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_MDx_BINARY_SIZE, 0); #else pbkdf2_md5((unsigned char*)(saved_key[index]), strlen(saved_key[index]), (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_MDx_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; //dump_stuff_msg("\nbinary", crypt_out[count - 1], 16); return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_MDx_BINARY_SIZE); } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int cmp_exact(char *source, int index) { return pbkdf2_hmac_md5_cmp_exact(get_key(index), source, (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_md5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_MDx_BINARY_SIZE, PBKDF2_32_BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, pbkdf2_hmac_md5_common_tests }, { init, done, fmt_default_reset, fmt_default_prepare, pbkdf2_hmac_md5_valid, pbkdf2_hmac_md5_split, pbkdf2_hmac_md5_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__one_int16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_int16_int16 // op(A') function: GB_unop_tran__one_int16_int16 // C type: int16_t // A type: int16_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_int16_int16 ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = 1 ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_int16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
comm.h
/** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include "mxnet/ndarray.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { #if MXNET_USE_CUDA int gpu_num; int ret = cudaGetDeviceCount(&gpu_num); pinned_ctx_ = (ret == 0 && gpu_num > 0) ? Context::CPUPinned(0) : Context::CPU(); #else pinned_ctx_ = Context::CPU(); #endif } virtual ~Comm() { } /** * \brief init key with the data shape */ virtual void Init(int key, const TShape &shape) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } protected: Context pinned_ctx_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); } virtual ~CommCPU() { } void Init(int key, const TShape &shape) override { merge_buf_[key].merged = NDArray(shape, pinned_ctx_); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); auto& buf = merge_buf_[key]; CopyFromTo(src[0], &buf.merged, priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { buf.copy_buf[j] = NDArray(src[0].shape(), pinned_ctx_); } } for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushSync([reduce, this](RunContext rctx) { ReduceSumCPU(reduce); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce")); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // first copy data to cpu, then broadcast auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) CopyFromTo(buf.merged, d, priority); } } private: inline static void ReduceSumCPU( const std::vector<real_t*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); // ge ptr out std::vector<real_t*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, real_t>().dptr_; } size_t total = in_data[0].shape().Size(); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const TShape &shape) override { sorted_key_shape_.push_back(std::make_pair(key, shape)); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &(buf.merged), priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) { CopyFromTo(buf.merged, d, priority); } } } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); for (int i = 0; i < n; ++i) { cudaSetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } using KeyShape = std::pair<int, TShape>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_shape_.begin(), sorted_key_shape_.end(), []( const KeyShape& a, const KeyShape& b) { return a.second.Size() > b.second.Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_shape_.size(); ++i) { int k = sorted_key_shape_[i].first; TShape s = sorted_key_shape_[i].second; auto& buf = merge_buf_[k]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } buf.merged = NDArray(s, ctx); ctx_info[ctx.dev_id].second += s.Size(); } inited_ = true; } std::vector<KeyShape> sorted_key_shape_; /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the gpu buffer std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; bool inited_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
simd_metadata.c
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86 // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512 // RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86 // RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX // RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512 // RUN: %clang_cc1 -fopenmp -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC // RUN: %clang_cc1 -fopenmp -triple powerpc64-unknown-unknown -target-abi elfv1-qpx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC-QPX // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86 // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512 // RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86 // RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX // RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512 // RUN: %clang_cc1 -fopenmp-simd -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC // RUN: %clang_cc1 -fopenmp-simd -triple powerpc64-unknown-unknown -target-abi elfv1-qpx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC-QPX void h1(float *c, float *a, double b[], int size) { // CHECK-LABEL: define void @h1 int t = 0; #pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) // CHECK: [[C_PTRINT:%.+]] = ptrtoint // CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 // CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) // CHECK: [[A_PTRINT:%.+]] = ptrtoint // X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 // X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 // PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) // CHECK: [[B_PTRINT:%.+]] = ptrtoint // X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 // X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 // X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 // PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 // PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 // CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; } // do not emit llvm.access.group metadata due to usage of safelen clause. // CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}} #pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8) // CHECK: [[C_PTRINT:%.+]] = ptrtoint // CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 // CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) // CHECK: [[A_PTRINT:%.+]] = ptrtoint // X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 // X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 // PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) // CHECK: [[B_PTRINT:%.+]] = ptrtoint // X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 // X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 // X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 // PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 // PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 // CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; } // do not emit llvm.access.group metadata due to usage of safelen clause. // CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}} #pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8) // CHECK: [[C_PTRINT:%.+]] = ptrtoint // CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 // CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) // CHECK: [[A_PTRINT:%.+]] = ptrtoint // X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 // X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 // PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 // CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) // CHECK: [[B_PTRINT:%.+]] = ptrtoint // X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 // X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 // X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 // PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 // PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 // CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; // CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_7:[0-9]+]] } } void h2(float *c, float *a, float *b, int size) { // CHECK-LABEL: define void @h2 int t = 0; #pragma omp simd linear(t) for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; // CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_10:[0-9]+]] } // CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H2_HEADER:![0-9]+]] } void h3(float *c, float *a, float *b, int size) { // CHECK-LABEL: define void @h3 #pragma omp simd for (int i = 0; i < size; ++i) { for (int j = 0; j < size; ++j) { c[j*i] = a[i] * b[j]; } // CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_13:[0-9]+]] } // CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H3_HEADER:![0-9]+]] } // Metadata for h1: // CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_16:![0-9]+]], [[LOOP_VEC_ENABLE:![0-9]+]]} // CHECK: [[LOOP_WIDTH_16]] = !{!"llvm.loop.vectorize.width", i32 16} // CHECK: [[LOOP_VEC_ENABLE]] = !{!"llvm.loop.vectorize.enable", i1 true} // CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8:![0-9]+]], [[LOOP_VEC_ENABLE]]} // CHECK: [[LOOP_WIDTH_8]] = !{!"llvm.loop.vectorize.width", i32 8} // CHECK: ![[ACCESS_GROUP_7]] = distinct !{} // CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], ![[PARALLEL_ACCESSES_9:[0-9]+]], [[LOOP_WIDTH_8]], [[LOOP_VEC_ENABLE]]} // CHECK: ![[PARALLEL_ACCESSES_9]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_7]]} // // Metadata for h2: // CHECK: ![[ACCESS_GROUP_10]] = distinct !{} // CHECK: [[LOOP_H2_HEADER]] = distinct !{[[LOOP_H2_HEADER]], ![[PARALLEL_ACCESSES_12:[0-9]+]], [[LOOP_VEC_ENABLE]]} // CHECK: ![[PARALLEL_ACCESSES_12]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_10]]} // // Metadata for h3: // CHECK: ![[ACCESS_GROUP_13]] = distinct !{} // CHECK: [[LOOP_H3_HEADER]] = distinct !{[[LOOP_H3_HEADER]], ![[PARALLEL_ACCESSES_15:[0-9]+]], [[LOOP_VEC_ENABLE]]} // CHECK: ![[PARALLEL_ACCESSES_15]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_13]]} //
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXBaseSpecifierMatcher = internal::Matcher<CXXBaseSpecifier>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; using AttrMatcher = internal::Matcher<Attr>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the statement are expanded from different /// appearances of the macro. AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl> decompositionDecl; /// Matches binding declarations /// Example matches \c foo and \c bar /// (matcher = bindingDecl() /// /// \code /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl> bindingDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches class bases. /// /// Examples matches \c public virtual B. /// \code /// class B {}; /// class C : public virtual B {}; /// \endcode extern const internal::VariadicAllOfMatcher<CXXBaseSpecifier> cxxBaseSpecifier; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches an entity that has been implicitly added by the compiler (e.g. /// implicit default/copy constructors). AST_POLYMORPHIC_MATCHER(isImplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Attr)) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder) != List.end(); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreUnlessSpelledInSource, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename... P> class MatcherT, typename... P, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>> traverse(TraversalKind TK, const internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>(TK, InnerMatcher); } template <typename... T> internal::Matcher<typename internal::GetClade<T...>::Type> traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) { return traverse(TK, InnerMatcher.with()); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for a. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return toString(Node.getAsIntegral(), 10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using-enum declarations. /// /// Given /// \code /// namespace X { enum x {...}; } /// using enum X::x; /// \endcode /// usingEnumDecl() /// matches \code using enum X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingEnumDecl> usingEnumDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode /// See also the binaryOperation() matcher for more-general matching of binary /// uses of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches rewritten binary operators /// /// Example matches use of "<": /// \code /// #include <compare> /// struct HasSpaceshipMem { /// int a; /// constexpr auto operator<=>(const HasSpaceshipMem&) const = default; /// }; /// void compare() { /// HasSpaceshipMem hs1, hs2; /// if (hs1 < hs2) /// return; /// } /// \endcode /// See also the binaryOperation() matcher for more-general matching /// of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXRewrittenBinaryOperator> cxxRewrittenBinaryOperator; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches co_return statements. /// /// Given /// \code /// while (true) { co_return; } /// \endcode /// coreturnStmt() /// matches 'co_return' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoreturnStmt> coreturnStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches co_await expressions. /// /// Given /// \code /// co_await 1; /// \endcode /// coawaitExpr() /// matches 'co_await 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr> coawaitExpr; /// Matches co_await expressions where the type of the promise is dependent extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr> dependentCoawaitExpr; /// Matches co_yield expressions. /// /// Given /// \code /// co_yield 1; /// \endcode /// coyieldExpr() /// matches 'co_yield 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr> coyieldExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches C11 _Generic expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr> genericSelectionExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode /// See also the binaryOperation() matcher for more-general matching. extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches any of the \p NodeMatchers with InnerMatchers nested within /// /// Given /// \code /// if (true); /// for (; true; ); /// \endcode /// with the matcher /// \code /// mapAnyOf(ifStmt, forStmt).with( /// hasCondition(cxxBoolLiteralExpr(equals(true))) /// ).bind("trueCond") /// \endcode /// matches the \c if and the \c for. It is equivalent to: /// \code /// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true))); /// anyOf( /// ifStmt(trueCond).bind("trueCond"), /// forStmt(trueCond).bind("trueCond") /// ); /// \endcode /// /// The with() chain-call accepts zero or more matchers which are combined /// as-if with allOf() in each of the node matchers. /// Usable as: Any Matcher template <typename T, typename... U> auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) { return internal::MapAnyOfHelper<U...>(); } /// Matches nodes which can be used with binary operators. /// /// The code /// \code /// var1 != var2; /// \endcode /// might be represented in the clang AST as a binaryOperator, a /// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on /// /// * whether the types of var1 and var2 are fundamental (binaryOperator) or at /// least one is a class type (cxxOperatorCallExpr) /// * whether the code appears in a template declaration, if at least one of the /// vars is a dependent-type (binaryOperator) /// * whether the code relies on a rewritten binary operator, such as a /// spaceship operator or an inverted equality operator /// (cxxRewrittenBinaryOperator) /// /// This matcher elides details in places where the matchers for the nodes are /// compatible. /// /// Given /// \code /// binaryOperation( /// hasOperatorName("!="), /// hasLHS(expr().bind("lhs")), /// hasRHS(expr().bind("rhs")) /// ) /// \endcode /// matches each use of "!=" in: /// \code /// struct S{ /// bool operator!=(const S&) const; /// }; /// /// void foo() /// { /// 1 != 2; /// S() != S(); /// } /// /// template<typename T> /// void templ() /// { /// 1 != 2; /// T() != S(); /// } /// struct HasOpEq /// { /// bool operator==(const HasOpEq &) const; /// }; /// /// void inverse() /// { /// HasOpEq s1; /// HasOpEq s2; /// if (s1 != s2) /// return; /// } /// /// struct HasSpaceship /// { /// bool operator<=>(const HasOpEq &) const; /// }; /// /// void use_spaceship() /// { /// HasSpaceship s1; /// HasSpaceship s2; /// if (s1 != s2) /// return; /// } /// \endcode extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator> binaryOperation; /// Matches function calls and constructor calls /// /// Because CallExpr and CXXConstructExpr do not share a common /// base class with API accessing arguments etc, AST Matchers for code /// which should match both are typically duplicated. This matcher /// removes the need for duplication. /// /// Given code /// \code /// struct ConstructorTakesInt /// { /// ConstructorTakesInt(int i) {} /// }; /// /// void callTakesInt(int i) /// { /// } /// /// void doCall() /// { /// callTakesInt(42); /// } /// /// void doConstruct() /// { /// ConstructorTakesInt cti(42); /// } /// \endcode /// /// The matcher /// \code /// invocation(hasArgument(0, integerLiteral(equals(42)))) /// \endcode /// matches the expression in both doCall and doConstruct extern const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcher< internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcher< internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>>({std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadedOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcher<internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches template-dependent, but known, member names. /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the known name of members. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()` AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) { return Node.getMember().getAsString() == N; } /// Matches template-dependent, but known, member names against an already-bound /// node /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the name of already-bound VarDecl, FieldDecl /// and CXXMethodDecl nodes. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// The matcher /// @code /// \c cxxDependentScopeMemberExpr( /// hasObjectExpression(declRefExpr(hasType(templateSpecializationType( /// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has( /// cxxMethodDecl(hasName("mem")).bind("templMem") /// ))))) /// )))), /// memberHasSameNameAsBoundNode("templMem") /// ) /// @endcode /// first matches and binds the @c mem member of the @c S template, then /// compares its name to the usage in @c s.mem() in the @c x function template AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode, std::string, BindingID) { auto MemberName = Node.getMember().getAsString(); return Builder->removeBindings( [this, MemberName](const BoundNodesMap &Nodes) { const auto &BN = Nodes.getNode(this->BindingID); if (const auto *ND = BN.get<NamedDecl>()) { if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND)) return true; return ND->getName() != MemberName; } return true; }); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result(*Builder); auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, &Result); if (MatchIt == Node.method_end()) return false; if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit()) return false; *Builder = std::move(Result); return true; } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcher< internal::HasDeclarationMatcher, void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcher< internal::HasDeclarationMatcher, void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>( InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// and public virtual X (matcher = cxxBaseSpecifier(hasType( /// asString("class X"))) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// class Z : public virtual X {}; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// and public virtual X (matcher = cxxBaseSpecifier(hasType( /// cxxRecordDecl(hasName("X")))) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// class Z : public virtual X {}; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of a node matches the inner matcher. /// /// Examples: /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x /// /// \code /// auto x = int(3); /// \code /// cxxTemporaryObjectExpr(hasTypeLoc(loc(asString("int")))) /// matches int(3) /// /// \code /// struct Foo { Foo(int, int); }; /// auto x = Foo(1, 2); /// \code /// cxxFunctionalCastExpr(hasTypeLoc(loc(asString("struct Foo")))) /// matches Foo(1, 2) /// /// Usable as: Matcher<BlockDecl>, Matcher<CXXBaseSpecifier>, /// Matcher<CXXCtorInitializer>, Matcher<CXXFunctionalCastExpr>, /// Matcher<CXXNewExpr>, Matcher<CXXTemporaryObjectExpr>, /// Matcher<CXXUnresolvedConstructExpr>, /// Matcher<ClassTemplateSpecializationDecl>, Matcher<CompoundLiteralExpr>, /// Matcher<DeclaratorDecl>, Matcher<ExplicitCastExpr>, /// Matcher<ObjCPropertyDecl>, Matcher<TemplateArgumentLoc>, /// Matcher<TypedefNameDecl> AST_POLYMORPHIC_MATCHER_P( hasTypeLoc, AST_POLYMORPHIC_SUPPORTED_TYPES( BlockDecl, CXXBaseSpecifier, CXXCtorInitializer, CXXFunctionalCastExpr, CXXNewExpr, CXXTemporaryObjectExpr, CXXUnresolvedConstructExpr, ClassTemplateSpecializationDecl, CompoundLiteralExpr, DeclaratorDecl, ExplicitCastExpr, ObjCPropertyDecl, TemplateArgumentLoc, TypedefNameDecl), internal::Matcher<TypeLoc>, Inner) { TypeSourceInfo *source = internal::GetTypeSourceInfo(Node); if (source == nullptr) { // This happens for example for implicit destructors. return false; } return Inner.matches(source->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder) != Node.decls_end(); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N) { unsigned NumArgs = Node.getNumArgs(); if (!Finder->isTraversalIgnoringImplicitNodes()) return NumArgs == N; while (NumArgs) { if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1))) break; --NumArgs; } return NumArgs == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { if (N >= Node.getNumArgs()) return false; const Expr *Arg = Node.getArg(N); if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) return false; return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); if (MatchIt == Node.init_end()) return false; return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes(); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) break; BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; unsigned NumArgs = Node.getNumArgs(); if (FProto && FProto->isVariadic()) NumArgs = std::min(NumArgs, FProto->getNumParams()); for (; ArgIndex < NumArgs; ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder) != Node.param_end(); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches weak function declarations. /// /// Given: /// \code /// void foo() __attribute__((__weakref__("__foo"))); /// void bar(); /// \endcode /// functionDecl(isWeak()) /// matches the weak declaration "foo", but not "bar". AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node)) return false; const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// functionDecl(hasAnyBody(compoundStmt())) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder) != CS->body_end(); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcher<internal::ValueEqualsMatcher, void(internal::AllNodeBaseTypes), ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcher<internal::ValueEqualsMatcher, void(internal::AllNodeBaseTypes), ValueT>( Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P( hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::string, Name) { if (Optional<StringRef> OpName = internal::getOpName(Node)) return *OpName == Name; return false; } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcher<internal::HasAnyOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::vector<std::string>>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = internal::getLHS(Node); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = internal::getRHS(Node); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. AST_POLYMORPHIC_MATCHER_P( hasEitherOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, InnerMatcher) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode AST_POLYMORPHIC_MATCHER_P2( hasOperands, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1)))) .matches(Node, Finder, Builder); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator, CXXOperatorCallExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Operand = internal::getSubExpr(Node); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { ASTChildrenNotSpelledInSourceScope RAII(Finder, false); const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } AST_MATCHER(CXXConstructorDecl, isInheritingConstructor) { return Node.isInheritingConstructor(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(BaseUsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder) != Node.shadow_end(); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches `QualifiedTypeLoc`s in the clang AST. /// /// Given /// \code /// const int x = 0; /// \endcode /// qualifiedTypeLoc() /// matches `const int`. extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, QualifiedTypeLoc> qualifiedTypeLoc; /// Matches `QualifiedTypeLoc`s that have an unqualified `TypeLoc` matching /// `InnerMatcher`. /// /// Given /// \code /// int* const x; /// const int y; /// \endcode /// qualifiedTypeLoc(hasUnqualifiedLoc(pointerTypeLoc())) /// matches the `TypeLoc` of the variable declaration of `x`, but not `y`. AST_MATCHER_P(QualifiedTypeLoc, hasUnqualifiedLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return InnerMatcher.matches(Node.getUnqualifiedLoc(), Finder, Builder); } /// Matches a function declared with the specified return `TypeLoc`. /// /// Given /// \code /// int f() { return 5; } /// void g() {} /// \endcode /// functionDecl(hasReturnTypeLoc(loc(asString("int")))) /// matches the declaration of `f`, but not `g`. AST_MATCHER_P(FunctionDecl, hasReturnTypeLoc, internal::Matcher<TypeLoc>, ReturnMatcher) { auto Loc = Node.getFunctionTypeLoc(); return Loc && ReturnMatcher.matches(Loc.getReturnLoc(), Finder, Builder); } /// Matches pointer `TypeLoc`s. /// /// Given /// \code /// int* x; /// \endcode /// pointerTypeLoc() /// matches `int*`. extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, PointerTypeLoc> pointerTypeLoc; /// Matches pointer `TypeLoc`s that have a pointee `TypeLoc` matching /// `PointeeMatcher`. /// /// Given /// \code /// int* x; /// \endcode /// pointerTypeLoc(hasPointeeLoc(loc(asString("int")))) /// matches `int*`. AST_MATCHER_P(PointerTypeLoc, hasPointeeLoc, internal::Matcher<TypeLoc>, PointeeMatcher) { return PointeeMatcher.matches(Node.getPointeeLoc(), Finder, Builder); } /// Matches reference `TypeLoc`s. /// /// Given /// \code /// int x = 3; /// int& l = x; /// int&& r = 3; /// \endcode /// referenceTypeLoc() /// matches `int&` and `int&&`. extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, ReferenceTypeLoc> referenceTypeLoc; /// Matches reference `TypeLoc`s that have a referent `TypeLoc` matching /// `ReferentMatcher`. /// /// Given /// \code /// int x = 3; /// int& xx = x; /// \endcode /// referenceTypeLoc(hasReferentLoc(loc(asString("int")))) /// matches `int&`. AST_MATCHER_P(ReferenceTypeLoc, hasReferentLoc, internal::Matcher<TypeLoc>, ReferentMatcher) { return ReferentMatcher.matches(Node.getPointeeLoc(), Finder, Builder); } /// Matches template specialization `TypeLoc`s. /// /// Given /// \code /// template <typename T> class C {}; /// C<char> var; /// \endcode /// varDecl(hasTypeLoc(templateSpecializationTypeLoc(typeLoc()))) /// matches `C<char> var`. extern const internal::VariadicDynCastAllOfMatcher< TypeLoc, TemplateSpecializationTypeLoc> templateSpecializationTypeLoc; /// Matches template specialization `TypeLoc`s that have at least one /// `TemplateArgumentLoc` matching the given `InnerMatcher`. /// /// Given /// \code /// template<typename T> class A {}; /// A<int> a; /// \endcode /// varDecl(hasTypeLoc(templateSpecializationTypeLoc(hasAnyTemplateArgumentLoc( /// hasTypeLoc(loc(asString("int"))))))) /// matches `A<int> a`. AST_MATCHER_P(TemplateSpecializationTypeLoc, hasAnyTemplateArgumentLoc, internal::Matcher<TemplateArgumentLoc>, InnerMatcher) { for (unsigned Index = 0, N = Node.getNumArgs(); Index < N; ++Index) { clang::ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(Node.getArgLoc(Index), Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches template specialization `TypeLoc`s where the n'th /// `TemplateArgumentLoc` matches the given `InnerMatcher`. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<double, int> b; /// A<int, double> c; /// \endcode /// varDecl(hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(0, /// hasTypeLoc(loc(asString("double"))))))) /// matches `A<double, int> b`, but not `A<int, double> c`. AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgumentLoc, AST_POLYMORPHIC_SUPPORTED_TYPES(DeclRefExpr, TemplateSpecializationTypeLoc), unsigned, Index, internal::Matcher<TemplateArgumentLoc>, InnerMatcher) { return internal::MatchTemplateArgLocAt(Node, Index, InnerMatcher, Finder, Builder); } /// Matches C or C++ elaborated `TypeLoc`s. /// /// Given /// \code /// struct s {}; /// struct s ss; /// \endcode /// elaboratedTypeLoc() /// matches the `TypeLoc` of the variable declaration of `ss`. extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, ElaboratedTypeLoc> elaboratedTypeLoc; /// Matches elaborated `TypeLoc`s that have a named `TypeLoc` matching /// `InnerMatcher`. /// /// Given /// \code /// template <typename T> /// class C {}; /// class C<int> c; /// /// class D {}; /// class D d; /// \endcode /// elaboratedTypeLoc(hasNamedTypeLoc(templateSpecializationTypeLoc())); /// matches the `TypeLoc` of the variable declaration of `c`, but not `d`. AST_MATCHER_P(ElaboratedTypeLoc, hasNamedTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedTypeLoc(), Finder, Builder); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Matches attributes. /// Attributes may be attached with a variety of different syntaxes (including /// keywords, C++11 attributes, GNU ``__attribute``` and MSVC `__declspec``, /// and ``#pragma``s). They may also be implicit. /// /// Given /// \code /// struct [[nodiscard]] Foo{}; /// void bar(int * __attribute__((nonnull)) ); /// __declspec(noinline) void baz(); /// /// #pragma omp declare simd /// int min(); /// \endcode /// attr() /// matches "nodiscard", "nonnull", "noinline", and the whole "#pragma" line. extern const internal::VariadicAllOfMatcher<Attr> attr; /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten()) continue; BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; ASTChildrenNotSpelledInSourceScope RAII(Finder, false); return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) { return anyOf( gnuNullExpr(), cxxNullPtrLiteralExpr(), integerLiteral(equals(0), hasParent(expr(hasType(pointerType()))))); } /// Matches the DecompositionDecl the binding belongs to. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// bindingDecl(hasName("f"), /// forDecomposition(decompositionDecl()) /// \endcode /// matches 'f' in 'auto &[f, s, t]'. AST_MATCHER_P(BindingDecl, forDecomposition, internal::Matcher<ValueDecl>, InnerMatcher) { if (const ValueDecl *VD = Node.getDecomposedDecl()) return InnerMatcher.matches(*VD, Finder, Builder); return false; } /// Matches the Nth binding of a DecompositionDecl. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// decompositionDecl(hasBinding(0, /// bindingDecl(hasName("f").bind("fBinding")))) /// \endcode /// matches the decomposition decl with 'f' bound to "fBinding". AST_MATCHER_P2(DecompositionDecl, hasBinding, unsigned, N, internal::Matcher<BindingDecl>, InnerMatcher) { if (Node.bindings().size() <= N) return false; return InnerMatcher.matches(*Node.bindings()[N], Finder, Builder); } /// Matches any binding of a DecompositionDecl. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// decompositionDecl(hasAnyBinding(bindingDecl(hasName("f").bind("fBinding")))) /// \endcode /// matches the decomposition decl with 'f' bound to "fBinding". AST_MATCHER_P(DecompositionDecl, hasAnyBinding, internal::Matcher<BindingDecl>, InnerMatcher) { return llvm::any_of(Node.bindings(), [&](const auto *Binding) { return InnerMatcher.matches(*Binding, Finder, Builder); }); } /// Matches declaration of the function the statement belongs to. /// /// Deprecated. Use forCallable() to correctly handle the situation when /// the declaration is not a function (but a block or an Objective-C method). /// forFunction() not only fails to take non-functions into account but also /// may match the wrong declaration in their presence. /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while (!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for (const auto &Parent : Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches declaration of the function, method, or block the statement /// belongs to. /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forCallable(functionDecl(hasName("operator=")))) /// matches 'return *this' /// but does not match 'return v > 0' /// /// Given: /// \code /// -(void) foo { /// int x = 1; /// dispatch_sync(queue, ^{ int y = 2; }); /// } /// \endcode /// declStmt(forCallable(objcMethodDecl())) /// matches 'int x = 1' /// but does not match 'int y = 2'. /// whereas declStmt(forCallable(blockDecl())) /// matches 'int y = 2' /// but does not match 'int x = 1'. AST_MATCHER_P(Stmt, forCallable, internal::Matcher<Decl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while (!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else if (const auto *ObjCMethodDeclNode = CurNode.get<ObjCMethodDecl>()) { if (InnerMatcher.matches(*ObjCMethodDeclNode, Finder, Builder)) { return true; } } else if (const auto *BlockDeclNode = CurNode.get<BlockDecl>()) { if (InnerMatcher.matches(*BlockDeclNode, Finder, Builder)) { return true; } } else { for (const auto &Parent : Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder) != Clauses.end(); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H