source
stringlengths
3
92
c
stringlengths
26
2.25M
openmp-ex02.c
#include <stdio.h> #include <omp.h> int main(void) { int max_threads = 5; printf ("You're all individuals!\n"); omp_set_num_threads(max_threads); /* now we have two competing values for the number of threads in this * region: who wins? */ #pragma omp parallel num_threads(7) { printf("Yes, we're all individuals!\n"); } return 0; }
GB_unaryop__lnot_int32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int32_fp32 // op(A') function: GB_tran__lnot_int32_fp32 // C type: int32_t // A type: float // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int32_t z ; GB_CAST_SIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int32_fp32 ( int32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
update_ops_named_state.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif void normalize(double squared_norm, CTYPE* state, ITYPE dim){ const ITYPE loop_dim = dim; const double normalize_factor = sqrt(1./squared_norm); ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for(state_index=0 ; state_index<loop_dim ; ++state_index){ state[state_index] *= normalize_factor; } } void state_add(const CTYPE *state_added, CTYPE *state, ITYPE dim) { ITYPE index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < dim; ++index) { state[index] += state_added[index]; } } void state_multiply(CTYPE coef, CTYPE *state, ITYPE dim) { ITYPE index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < dim; ++index) { state[index] *= coef; } }
10.norace5.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(8) { #pragma omp sections lastprivate(x) { { x = 1; } #pragma omp section { x = 2; } } } return x; } // CHECK: Region is Data Race Free. // END
sieveOfErastotenes.c
/* Adaptado de: https://ideone.com/JU5CfV e https://github.com/stbrumme/eratosthenes --- Medição do tempo usando clock Erastothenes Sequencial: 5761455 / Tempo: 2.04s Erastothenes Paralelo: 5761455 / Tempo: 1.07s Speed up: 1.9 --- Medição do tempo usando comando time Erastothenes Sequencial: 1,95s user 0,03s system 99% cpu 1,977 total Erastothenes Paralelo: 1,53s user 0,00s system 514% cpu 0,299 total Speed up: 1.2 */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> const int blockSize = 128 * 1024; const int n = 100000000; const bool useOpenMP = true; // Processar os numeros impares de blocos especificos int eratosthenesOddSingleBlock(const int from, const int to) { const int memorySize = (to - from + 1) / 2; // initializa char *isPrime = malloc(memorySize * sizeof(char)); for (int i = 0; i < memorySize; i++) isPrime[i] = 1; for (int i = 3; i * i <= to; i += 2) { // pular multiplos de 3 if (i >= 3 * 3 && i % 3 == 0) continue; // pular multiplos de 5 if (i >= 5 * 5 && i % 5 == 0) continue; // pular multiplos de 7 if (i >= 7 * 7 && i % 7 == 0) continue; // pular multiplos de 11 if (i >= 11 * 11 && i % 11 == 0) continue; // pular multiplos de 13 if (i >= 13 * 13 && i % 13 == 0) continue; // pula anteriores a metade int minJ = ((from + i - 1) / i) * i; if (minJ < i * i) minJ = i * i; // valor inicial tem de ser impar if ((minJ & 1) == 0) minJ += i; // encontra todos os nao primos impares for (int j = minJ; j <= to; j += 2 * i) { int index = j - from; isPrime[index / 2] = 0; } } // conta os primos nesse bloco int found = 0; for (int i = 0; i < memorySize; i++) found += isPrime[i]; // 2 nao e' impar if (from <= 2) found++; free(isPrime); return found; } // Fragmentar o crivo em blocos para facilitar o paralelismo int eratosthenesBlockwise(int fim) { // habilitar openmp omp_set_num_threads(useOpenMP ? omp_get_num_procs() : 1); double found = 0; #pragma omp parallel for reduction(+ : found) for (int from = 2; from <= fim; from += blockSize) { int to = from + blockSize; if (to > fim) to = fim; found += eratosthenesOddSingleBlock(from, to); } return found; } int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool *)malloc((n + 1) * sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true, (n + 1) * sizeof(bool)); for (int p = 2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p for (int i = p * 2; i <= n; i += p) prime[i] = false; } } // count prime numbers for (int p = 2; p <= n; p++) if (prime[p]) primes++; return (primes); } int main() { clock_t tStart = clock(); int eratosthenes = sieveOfEratosthenes(n); printf("Erastothenes Sequencial: %d / Tempo: %.2fs\n", eratosthenes, (double)(clock() - tStart) / CLOCKS_PER_SEC); tStart = clock(); int parallelratosthenes = eratosthenesBlockwise(n); printf("Erastothenes Paralelo: %d / Tempo: %.2fs\n", parallelratosthenes, (double)(clock() - tStart) / CLOCKS_PER_SEC); return 0; }
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % John Cristy % % July 2009 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *fourier) { double *roll; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ roll=(double *) AcquireQuantumMemory((size_t) height,width*sizeof(*roll)); if (roll == (double *) NULL) return(MagickFalse); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; roll[v*width+u]=fourier[i++]; } } (void) CopyMagickMemory(fourier,roll,height*width*sizeof(*roll)); roll=(double *) RelinquishMagickMemory(roll); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source,double *destination) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,source); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) destination[width*y+x+width/2L]=source[center*y+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) destination[width*(height-y)+width/2L-x-1L]=source[center*y+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) destination[-x+width/2L-1L]=destination[x+width/2L+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_source, *phase_source; Image *magnitude_image, *phase_image; MagickBooleanType status; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,fourier_info->width*sizeof(*magnitude_source)); if (magnitude_source == (double *) NULL) return(MagickFalse); (void) ResetMagickMemory(magnitude_source,0,fourier_info->height* fourier_info->width*sizeof(*magnitude_source)); phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_source)); if (phase_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(MagickFalse); } status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height, magnitude,magnitude_source); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height,phase, phase_source); CorrectPhaseLHS(fourier_info->height,fourier_info->height,phase_source); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_source[i]/=(2.0*MagickPI); phase_source[i]+=0.5; i++; } } magnitude_view=AcquireCacheView(magnitude_image); phase_view=AcquireCacheView(phase_image); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { q->red=ClampToQuantum(QuantumRange*magnitude_source[i]); break; } case GreenChannel: { q->green=ClampToQuantum(QuantumRange*magnitude_source[i]); break; } case BlueChannel: { q->blue=ClampToQuantum(QuantumRange*magnitude_source[i]); break; } case OpacityChannel: { q->opacity=ClampToQuantum(QuantumRange*magnitude_source[i]); break; } case IndexChannel: { indexes[x]=ClampToQuantum(QuantumRange*magnitude_source[i]); break; } case GrayChannels: { SetGrayPixelComponent(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { q->red=ClampToQuantum(QuantumRange*phase_source[i]); break; } case GreenChannel: { q->green=ClampToQuantum(QuantumRange*phase_source[i]); break; } case BlueChannel: { q->blue=ClampToQuantum(QuantumRange*phase_source[i]); break; } case OpacityChannel: { q->opacity=ClampToQuantum(QuantumRange*phase_source[i]); break; } case IndexChannel: { indexes[x]=ClampToQuantum(QuantumRange*phase_source[i]); break; } case GrayChannels: { SetGrayPixelComponent(q,ClampToQuantum(QuantumRange*phase_source[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); magnitude_view=DestroyCacheView(magnitude_view); phase_source=(double *) RelinquishMagickMemory(phase_source); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *image_view; double n, *source; fftw_complex *fourier; fftw_plan fftw_r2c_plan; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source)); if (source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } ResetMagickMemory(source,0,fourier_info->height*fourier_info->width* sizeof(*source)); i=0L; image_view=AcquireCacheView(image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source[i]=QuantumScale*GetRedPixelComponent(p); break; } case GreenChannel: { source[i]=QuantumScale*GetGreenPixelComponent(p); break; } case BlueChannel: { source[i]=QuantumScale*GetBluePixelComponent(p); break; } case OpacityChannel: { source[i]=QuantumScale*GetOpacityPixelComponent(p); break; } case IndexChannel: { source[i]=QuantumScale*indexes[x]; break; } case GrayChannels: { source[i]=QuantumScale*GetGrayPixelComponent(p); break; } } i++; p++; } } image_view=DestroyCacheView(image_view); fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source=(double *) RelinquishMagickMemory(source); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->width, source,fourier,FFTW_ESTIMATE); fftw_execute(fftw_r2c_plan); fftw_destroy_plan(fftw_r2c_plan); source=(double *) RelinquishMagickMemory(source); /* Normalize Fourier transform. */ n=(double) fourier_info->width*(double) fourier_info->width; i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]/=n; #else fourier[i][0]/=n; fourier[i][1]/=n; #endif i++; } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude[i]=cabs(fourier[i]); phase[i]=carg(fourier[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude[i]=creal(fourier[i]); phase[i]=cimag(fourier[i]); i++; } fourier=(fftw_complex *) RelinquishMagickMemory(fourier); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude, *phase; fftw_complex *fourier; FourierInfo fourier_info; MagickBooleanType status; size_t extent; fourier_info.width=image->columns; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2.0)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } status=ForwardFourierTransform(&fourier_info,image,magnitude,phase,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude,phase, exception); fourier=(fftw_complex *) RelinquishMagickMemory(fourier); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t extent, width; width=image->columns; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } magnitude_image=CloneImage(image,width,width,MagickFalse,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,width,MagickFalse,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsGrayImage(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayChannels,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->matte != MagickFalse) thread_status=ForwardFourierTransformChannel(image, OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2.0)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[center*(height-y)-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[center*y]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image,fftw_complex *fourier, ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude, *phase, *magnitude_source, *phase_source; MagickBooleanType status; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,fourier_info->width*sizeof(*magnitude_source)); if (magnitude_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_source)); if (phase_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(MagickFalse); } i=0L; magnitude_view=AcquireCacheView(magnitude_image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_source[i]=QuantumScale*GetRedPixelComponent(p); break; } case GreenChannel: { magnitude_source[i]=QuantumScale*GetGreenPixelComponent(p); break; } case BlueChannel: { magnitude_source[i]=QuantumScale*GetBluePixelComponent(p); break; } case OpacityChannel: { magnitude_source[i]=QuantumScale*GetOpacityPixelComponent(p); break; } case IndexChannel: { magnitude_source[i]=QuantumScale*indexes[x]; break; } case GrayChannels: { magnitude_source[i]=QuantumScale*GetGrayPixelComponent(p); break; } } i++; p++; } } i=0L; phase_view=AcquireCacheView(phase_image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_source[i]=QuantumScale*GetRedPixelComponent(p); break; } case GreenChannel: { phase_source[i]=QuantumScale*GetGreenPixelComponent(p); break; } case BlueChannel: { phase_source[i]=QuantumScale*GetBluePixelComponent(p); break; } case OpacityChannel: { phase_source[i]=QuantumScale*GetOpacityPixelComponent(p); break; } case IndexChannel: { phase_source[i]=QuantumScale*indexes[x]; break; } case GrayChannels: { phase_source[i]=QuantumScale*GetGrayPixelComponent(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_source[i]-=0.5; phase_source[i]*=(2.0*MagickPI); i++; } } magnitude_view=DestroyCacheView(magnitude_view); phase_view=DestroyCacheView(phase_view); magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); phase_source=(double *) RelinquishMagickMemory(phase_source); return(MagickFalse); } status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_source,magnitude); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); phase=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); phase_source=(double *) RelinquishMagickMemory(phase_source); return(MagickFalse); } CorrectPhaseLHS(fourier_info->width,fourier_info->width,phase_source); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_source,phase); phase_source=(double *) RelinquishMagickMemory(phase_source); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]=magnitude[i]*cos(phase[i])+I*magnitude[i]*sin(phase[i]); #else fourier[i][0]=magnitude[i]*cos(phase[i]); fourier[i][1]=magnitude[i]*sin(phase[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]=magnitude[i]+I*phase[i]; #else fourier[i][0]=magnitude[i]; fourier[i][1]=phase[i]; #endif i++; } phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier,Image *image,ExceptionInfo *exception) { CacheView *image_view; double *source; fftw_plan fftw_c2r_plan; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; ssize_t y; source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source)); if (source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif { fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier,source,FFTW_ESTIMATE); fftw_execute(fftw_c2r_plan); fftw_destroy_plan(fftw_c2r_plan); } i=0L; image_view=AcquireCacheView(image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { q->red=ClampToQuantum(QuantumRange*source[i]); break; } case GreenChannel: { q->green=ClampToQuantum(QuantumRange*source[i]); break; } case BlueChannel: { q->blue=ClampToQuantum(QuantumRange*source[i]); break; } case OpacityChannel: { q->opacity=ClampToQuantum(QuantumRange*source[i]); break; } case IndexChannel: { indexes[x]=ClampToQuantum(QuantumRange*source[i]); break; } case GrayChannels: { SetGrayPixelComponent(q,ClampToQuantum(QuantumRange*source[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source=(double *) RelinquishMagickMemory(source); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude, *phase; fftw_complex *fourier; FourierInfo fourier_info; MagickBooleanType status; size_t extent; fourier_info.width=magnitude_image->columns; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2.0)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } status=InverseFourier(&fourier_info,magnitude_image,phase_image,fourier, exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,fourier,fourier_image, exception); fourier=(fftw_complex *) RelinquishMagickMemory(fourier); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickFalse,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsGrayImage(magnitude_image,exception); if (is_gray != MagickFalse) is_gray=IsGrayImage(phase_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayChannels,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
flush_exampleA_21_1c.c
/* OpenMP spec30.pdf Example A.21.1c */ #include <omp.h> #define NUMBER_OF_THREADS 256 int synch[NUMBER_OF_THREADS]; float work[NUMBER_OF_THREADS]; float result[NUMBER_OF_THREADS]; float fn1(int i) { return i*2.0; } float fn2(float a, float b) { return a + b; } int main() { int iam, neighbor; #pragma omp parallel private(iam,neighbor) shared(work,synch) { iam = omp_get_thread_num(); synch[iam] = 0; #pragma omp barrier /*Do computation into my portion of work array */ work[iam] = fn1(iam); /* Announce that I am done with my work. The first flush * ensures that my work is made visible before synch. * The second flush ensures that synch is made visible. */ #pragma omp flush(work,synch) synch[iam] = 1; #pragma omp flush(synch) /* Wait for neighbor. The first flush ensures that synch is read * from memory, rather than from the temporary view of memory. * The second flush ensures that work is read from memory, and * is done so after the while loop exits. */ neighbor = (iam>0 ? iam : omp_get_num_threads()) - 1; while (synch[neighbor] == 0) { #pragma omp flush(synch) } #pragma omp flush(work,synch) /* Read neighbor’s values of work array */ result[iam] = fn2(work[neighbor], work[iam]); } /* output result here */ return 0; }
DRB003-antidep2-orig-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A two-level loop nest with loop carried anti-dependence on the outer level. Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18 */ #include <stdio.h> #include <stdlib.h> int main(int argc, char * argv[]) { int i, j; int len = 20; double a[20][20]; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<len; i ++ ) { #pragma cetus private(j) #pragma loop name main#0#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<len; j ++ ) { a[i][j]=(((i*len)+j)+0.5); } } #pragma cetus private(i, j) #pragma loop name main#1 for (i=0; i<(len-1); i+=1) { #pragma cetus private(j) #pragma loop name main#1#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<len; j+=1) { a[i][j]+=a[i+1][j]; } } #pragma cetus private(i, j) #pragma loop name main#2 for (i=0; i<len; i ++ ) { #pragma cetus private(j) #pragma loop name main#2#0 for (j=0; j<len; j ++ ) { printf("%lf", a[i][j]); } } printf("a[10][10]=%f\n", a[10][10]); _ret_val_0=0; return _ret_val_0; }
stribog_fmt_plug.c
/* * GOST R 34.11-2012 cracker patch for JtR. Hacked together during * the Hash Runner 2015 contest by Dhiru Kholia and Aleksey Cherepanov. * * Based on https://www.streebog.net/ and https://github.com/sjinks/php-stribog * code. See "LICENSE.gost" for licensing details of the original code. */ #include "arch.h" #if __SSE4_1__ #if FMT_EXTERNS_H extern struct fmt_main fmt_stribog_256; extern struct fmt_main fmt_stribog_512; #elif FMT_REGISTERS_H john_register_one(&fmt_stribog_256); john_register_one(&fmt_stribog_512); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "gost3411-2012-sse41.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 512 // XXX #endif #endif #include "memdbg.h" #define FORMAT_LABEL "stribog" #define FORMAT_NAME "" #define TAG256 "$stribog256$" #define TAG256_LENGTH (sizeof(TAG256)-1) #define TAG512 "$stribog512$" #define TAG512_LENGTH (sizeof(TAG512)-1) #define TAG_LENGTH TAG256_LENGTH #define FORMAT_TAG TAG256 #define ALGORITHM_NAME "GOST R 34.11-2012 128/128 SSE4.1 1x" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 64 - 1 #define CIPHERTEXT256_LENGTH 64 #define CIPHERTEXT512_LENGTH 128 #define CIPHERTEXT_LENGTH CIPHERTEXT256_LENGTH #define BINARY_SIZE_256 32 #define BINARY_SIZE_512 64 #define SALT_SIZE 0 #define SALT_ALIGN 1 #define BINARY_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests stribog_256_tests[] = { {"$stribog256$bbe19c8d2025d99f943a932a0b365a822aa36a4c479d22cc02c8973e219a533f", ""}, /* {"3f539a213e97c802cc229d474c6aa32a825a360b2a933a949fd925208d9ce1bb", ""}, */ /* 9d151eefd8590b89daa6ba6cb74af9275dd051026bb149a452fd84e5e57b5500 */ {"$stribog256$00557be5e584fd52a449b16b0251d05d27f94ab76cbaa6da890b59d8ef1e159d", "012345678901234567890123456789012345678901234567890123456789012"}, {NULL} }; static struct fmt_tests stribog_512_tests[] = { /* 8e945da209aa869f0455928529bcae4679e9873ab707b55315f56ceb98bef0a7362f715528356ee83cda5f2aac4c6ad2ba3a715c1bcd81cb8e9f90bf4c1c1a8a */ {"$stribog512$8a1a1c4cbf909f8ecb81cd1b5c713abad26a4cac2a5fda3ce86e352855712f36a7f0be98eb6cf51553b507b73a87e97946aebc29859255049f86aa09a25d948e", ""}, /* 1b54d01a4af5b9d5cc3d86d68d285462b19abc2475222f35c085122be4ba1ffa00ad30f8767b3a82384c6574f024c311e2a481332b08ef7f41797891c1646f48 */ {"$stribog512$486f64c1917879417fef082b3381a4e211c324f074654c38823a7b76f830ad00fa1fbae42b1285c0352f227524bc9ab16254288dd6863dccd5b9f54a1ad0541b", "012345678901234567890123456789012345678901234567890123456789012"}, {NULL} }; #define make_full_static_buf(type, var, len) static type (var)[(len)] #define make_dynamic_static_buf(type, var, len) \ static type *var; \ if (!var) \ var = mem_alloc_tiny((len), MEM_ALIGN_WORD) #if 1 #define make_static_buf make_dynamic_static_buf #else #define make_static_buf make_full_static_buf #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE_512 / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (!saved_key) { saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_SIMD); } if (!crypt_out) crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static char *split_256(char *ciphertext, int index, struct fmt_main *self) { make_static_buf(char, out, TAG_LENGTH + CIPHERTEXT_LENGTH + 1); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + TAG_LENGTH); return out; } static int valid_256(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; /* else */ /* return 0; */ if (strlen(p) != CIPHERTEXT_LENGTH) return 0; while(*p) if (atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static void *get_binary_256(char *ciphertext) { static unsigned char *out; char *p = ciphertext; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE_256, MEM_ALIGN_WORD); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE_256; i++) { out[BINARY_SIZE_256 - i - 1] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #undef TAG_LENGTH #undef FORMAT_TAG #undef CIPHERTEXT_LENGTH #define TAG_LENGTH TAG512_LENGTH #define FORMAT_TAG TAG512 #define CIPHERTEXT_LENGTH CIPHERTEXT512_LENGTH static char *split_512(char *ciphertext, int index, struct fmt_main *self) { make_static_buf(char, out, TAG_LENGTH + CIPHERTEXT_LENGTH + 1); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + TAG_LENGTH); return out; } static int valid_512(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; /* else */ /* return 0; */ if (strlen(p) != CIPHERTEXT_LENGTH) return 0; while(*p) if (atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static void *get_binary_512(char *ciphertext) { static unsigned char *out; char *p = ciphertext; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE_512, MEM_ALIGN_WORD); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE_512; i++) { out[BINARY_SIZE_512 - i - 1] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #undef TAG_LENGTH #undef FORMAT_TAG #undef CIPHERTEXT_LENGTH /* static int valid_256(char *ciphertext, struct fmt_main *self) */ /* { */ /* return valid(ciphertext, self, 64); */ /* } */ /* static int valid_512(char *ciphertext, struct fmt_main *self) */ /* { */ /* return valid(ciphertext, self, 128); */ /* } */ static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void stribog256_init(void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Init(ctx, 256); } static void stribog512_init(void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Init(ctx, 512); } static void stribog_update(void* context, const unsigned char* buf, unsigned int count) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; offset = (((size_t)buf + 15) & ~0x0F) - (size_t)buf; if (!offset) { GOST34112012Update(ctx, buf, count); } else { ALIGN(16) unsigned char tmp[15]; assert(offset < 16); memcpy(tmp, buf, offset); GOST34112012Update(ctx, tmp, offset); GOST34112012Update(ctx, buf + offset, count - offset); } } static void stribog_final(unsigned char* digest, void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Final(ctx, digest); } static int crypt_256(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { /* GOST34112012Context ctx; GOST34112012Init(&ctx, 256); GOST34112012Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); GOST34112012Final(&ctx, (unsigned char*)crypt_out[index]); */ GOST34112012Context ctx[2]; // alignment stuff stribog256_init((void *)ctx); stribog_update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); stribog_final((unsigned char*)crypt_out[index], &ctx); } return count; } static int crypt_512(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { /* GOST34112012Context ctx; GOST34112012Init(&ctx, 512); GOST34112012Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); GOST34112012Final(&ctx, (unsigned char*)crypt_out[index]); */ GOST34112012Context ctx[2]; // alignment stuff stribog512_init((void *)ctx); stribog_update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); stribog_final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one_256(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE_256); } static int cmp_one_512(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE_512); } static int cmp_exact(char *source, int index) { return 1; } static void stribog_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_stribog_256 = { { "Stribog-256", FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_256, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { TAG256 }, stribog_256_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid_256, split_256, get_binary_256, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, stribog_set_key, get_key, fmt_default_clear_keys, crypt_256, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one_256, cmp_exact } }; struct fmt_main fmt_stribog_512 = { { "Stribog-512", FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_512, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { TAG512 }, stribog_512_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid_512, split_512, get_binary_512, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, stribog_set_key, get_key, fmt_default_clear_keys, crypt_512, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one_512, cmp_exact } }; #endif /* plugin stanza */ #else #if !defined(FMT_EXTERNS_H) && !defined(FMT_REGISTERS_H) #ifdef __GNUC__ #warning Stribog-256 and Stribog-512 formats require SSE 4.1, formats disabled #elif _MSC_VER #pragma message(": warning Stribog-256 and Stribog-512 formats require SSE 4.1, formats disabled:") #endif #endif #endif /* __SSE4_1__ */
is_initial_device.c
// RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DUNUSED -Wall -Werror #include <omp.h> #include <stdio.h> int main() { int errors = 0; #ifdef UNUSED // Test if it is OK to leave the variants unused in the header #else // UNUSED int host = omp_is_initial_device(); int device = 1; #pragma omp target map(tofrom : device) { device = omp_is_initial_device(); } if (!host) { printf("omp_is_initial_device() returned false on host\n"); errors++; } if (device) { printf("omp_is_initial_device() returned true on device\n"); errors++; } #endif // UNUSED // CHECK: PASS printf("%s\n", errors ? "FAIL" : "PASS"); return errors; }
166. QR Eigen Values.c
/** * @file * \brief Compute real eigen values and eigen vectors of a symmetric matrix * method. * */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include "qr_decompose.h" #ifdef _OPENMP #include <omp.h> #endif #define LIMS 9 /**< limit of range of matrix values */ #define EPSILON 1e-10 /**< accuracy tolerance limit */ /** * create a square matrix of given size with random elements * \param[out] A matrix to create (must be pre-allocated in memory) * \param[in] N matrix size */ void create_matrix(double **A, int N) { int i, j, tmp, lim2 = LIMS >> 1; #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < N; i++) { A[i][i] = (rand() % LIMS) - lim2; for (j = i + 1; j < N; j++) { tmp = (rand() % LIMS) - lim2; A[i][j] = tmp; A[j][i] = tmp; } } } /** * Perform multiplication of two matrices. * * R2 must be equal to C1 * * Resultant matrix size should be R1xC2 * \param[in] A first matrix to multiply * \param[in] B second matrix to multiply * \param[out] OUT output matrix (must be pre-allocated) * \param[in] R1 number of rows of first matrix * \param[in] C1 number of columns of first matrix * \param[in] R2 number of rows of second matrix * \param[in] C2 number of columns of second matrix * \returns pointer to resultant matrix */ double **mat_mul(double **A, double **B, double **OUT, int R1, int C1, int R2, int C2) { if (C1 != R2) { perror("Matrix dimensions mismatch!"); return OUT; } int i; #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < R1; i++) { for (int j = 0; j < C2; j++) { OUT[i][j] = 0.f; for (int k = 0; k < C1; k++) OUT[i][j] += A[i][k] * B[k][j]; } } return OUT; } /** Compute eigen values using iterative shifted QR decomposition algorithm as * follows: * 1. Use last diagonal element of A as eigen value approximation \f$c\f$ * 2. Shift diagonals of matrix \f$A' = A - cI\f$ * 3. Decompose matrix \f$A'=QR\f$ * 4. Compute next approximation \f$A'_1 = RQ \f$ * 5. Shift diagonals back \f$A_1 = A'_1 + cI\f$ * 6. Termination condition check: last element below diagonal is almost 0 * 1. If not 0, go back to step 1 with the new approximation \f$A_1\f$ * 2. If 0, continue to step 7 * 7. Save last known \f$c\f$ as the eigen value. * 8. Are all eigen values found? * 1. If not, remove last row and column of \f$A_1\f$ and go back to step 1. * 2. If yes, stop. * * \note The matrix \f$A\f$ gets modified * * \param[in,out] A matrix to compute eigen values for * \param[out] eigen_vals resultant vector containing computed eigen values * \param[in] mat_size matrix size * \param[in] debug_print 1 to print intermediate Q & R matrices, 0 for not to * \returns time for computation in seconds */ double eigen_values(double **A, double *eigen_vals, int mat_size, char debug_print) { if (!eigen_vals) { perror("Output eigen value vector cannot be NULL!"); return -1; } double **R = (double **)malloc(sizeof(double *) * mat_size); double **Q = (double **)malloc(sizeof(double *) * mat_size); if (!Q || !R) { perror("Unable to allocate memory for Q & R!"); if (Q) { free(Q); } if (R) { free(R); } return -1; } /* allocate dynamic memory for matrices */ for (int i = 0; i < mat_size; i++) { R[i] = (double *)malloc(sizeof(double) * mat_size); Q[i] = (double *)malloc(sizeof(double) * mat_size); if (!Q[i] || !R[i]) { perror("Unable to allocate memory for Q & R."); for (; i >= 0; i--) { free(R[i]); free(Q[i]); } free(Q); free(R); return -1; } } if (debug_print) { print_matrix(A, mat_size, mat_size); } int rows = mat_size, columns = mat_size; int counter = 0, num_eigs = rows - 1; double last_eig = 0; clock_t t1 = clock(); while (num_eigs > 0) /* continue till all eigen values are found */ { /* iterate with QR decomposition */ while (fabs(A[num_eigs][num_eigs - 1]) > EPSILON) { last_eig = A[num_eigs][num_eigs]; for (int i = 0; i < rows; i++) A[i][i] -= last_eig; /* A - cI */ qr_decompose(A, Q, R, rows, columns); if (debug_print) { print_matrix(A, rows, columns); print_matrix(Q, rows, columns); print_matrix(R, columns, columns); printf("-------------------- %d ---------------------\n", ++counter); } mat_mul(R, Q, A, columns, columns, rows, columns); for (int i = 0; i < rows; i++) A[i][i] += last_eig; /* A + cI */ } /* store the converged eigen value */ eigen_vals[num_eigs] = last_eig; if (debug_print) { printf("========================\n"); printf("Eigen value: % g,\n", last_eig); printf("========================\n"); } num_eigs--; rows--; columns--; } eigen_vals[0] = A[0][0]; double dtime = (double)(clock() - t1) / CLOCKS_PER_SEC; if (debug_print) { print_matrix(R, mat_size, mat_size); print_matrix(Q, mat_size, mat_size); } /* cleanup dynamic memory */ for (int i = 0; i < mat_size; i++) { free(R[i]); free(Q[i]); } free(R); free(Q); return dtime; } /** * test function to compute eigen values of a 2x2 matrix * \f[\begin{bmatrix} * 5 & 7\\ * 7 & 11 * \end{bmatrix}\f] * which are approximately, {15.56158, 0.384227} */ void test1() { int mat_size = 2; double X[][2] = {{5, 7}, {7, 11}}; double y[] = {15.56158, 0.384227}; // corresponding y-values double eig_vals[2] = {0, 0}; // The following steps are to convert a "double[][]" to "double **" double **A = (double **)malloc(mat_size * sizeof(double *)); for (int i = 0; i < mat_size; i++) A[i] = X[i]; printf("------- Test 1 -------\n"); double dtime = eigen_values(A, eig_vals, mat_size, 0); for (int i = 0; i < mat_size; i++) { printf("%d/5 Checking for %.3g --> ", i + 1, y[i]); char result = 0; for (int j = 0; j < mat_size && !result; j++) { if (fabs(y[i] - eig_vals[j]) < 0.1) { result = 1; printf("(%.3g) ", eig_vals[j]); } } // ensure that i^th expected eigen value was computed assert(result != 0); printf("found\n"); } printf("Test 1 Passed in %.3g sec\n\n", dtime); free(A); } /** * test function to compute eigen values of a 2x2 matrix * \f[\begin{bmatrix} * -4& 4& 2& 0& -3\\ * 4& -4& 4& -3& -1\\ * 2& 4& 4& 3& -3\\ * 0& -3& 3& -1&-1\\ * -3& -1& -3& -3& 0 * \end{bmatrix}\f] * which are approximately, {9.27648, -9.26948, 2.0181, -1.03516, -5.98994} */ void test2() { int mat_size = 5; double X[][5] = {{-4, 4, 2, 0, -3}, {4, -4, 4, -3, -1}, {2, 4, 4, 3, -3}, {0, -3, 3, -1, -3}, {-3, -1, -3, -3, 0}}; double y[] = {9.27648, -9.26948, 2.0181, -1.03516, -5.98994}; // corresponding y-values double eig_vals[5]; // The following steps are to convert a "double[][]" to "double **" double **A = (double **)malloc(mat_size * sizeof(double *)); for (int i = 0; i < mat_size; i++) A[i] = X[i]; printf("------- Test 2 -------\n"); double dtime = eigen_values(A, eig_vals, mat_size, 0); for (int i = 0; i < mat_size; i++) { printf("%d/5 Checking for %.3g --> ", i + 1, y[i]); char result = 0; for (int j = 0; j < mat_size && !result; j++) { if (fabs(y[i] - eig_vals[j]) < 0.1) { result = 1; printf("(%.3g) ", eig_vals[j]); } } // ensure that i^th expected eigen value was computed assert(result != 0); printf("found\n"); } printf("Test 2 Passed in %.3g sec\n\n", dtime); free(A); } /** * main function */ int main(int argc, char **argv) { srand(time(NULL)); int mat_size = 5; if (argc == 2) { mat_size = atoi(argv[1]); } else { // if invalid input argument is given run tests test1(); test2(); printf("Usage: ./qr_eigen_values [mat_size]\n"); return 0; } if (mat_size < 2) { fprintf(stderr, "Matrix size should be > 2\n"); return -1; } int i; double **A = (double **)malloc(sizeof(double *) * mat_size); /* number of eigen values = matrix size */ double *eigen_vals = (double *)malloc(sizeof(double) * mat_size); if (!eigen_vals) { perror("Unable to allocate memory for eigen values!"); free(A); return -1; } for (i = 0; i < mat_size; i++) { A[i] = (double *)malloc(sizeof(double) * mat_size); eigen_vals[i] = 0.f; } /* create a random matrix */ create_matrix(A, mat_size); print_matrix(A, mat_size, mat_size); double dtime = eigen_values(A, eigen_vals, mat_size, 0); printf("Eigen vals: "); for (i = 0; i < mat_size; i++) printf("% 9.4g\t", eigen_vals[i]); printf("\nTime taken to compute: % .4g sec\n", dtime); for (int i = 0; i < mat_size; i++) free(A[i]); free(A); free(eigen_vals); return 0; }
restriction.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ static inline void restriction_pc_block(level_type *level_c, int id_c, level_type *level_f, int id_f, blockCopy_type *block, int restrictionType){ // restrict 3D array from read_i,j,k of read[] to write_i,j,k in write[] int dim_i = block->dim.i; // calculate the dimensions of the resultant coarse block int dim_j = block->dim.j; int dim_k = block->dim.k; int read_i = block->read.i; int read_j = block->read.j; int read_k = block->read.k; int read_jStride = block->read.jStride; int read_kStride = block->read.kStride; int write_i = block->write.i; int write_j = block->write.j; int write_k = block->write.k; int write_jStride = block->write.jStride; int write_kStride = block->write.kStride; double * __restrict__ read = block->read.ptr; double * __restrict__ write = block->write.ptr; if(block->read.box >=0){ read = level_f->my_boxes[ block->read.box].vectors[id_f] + level_f->my_boxes[ block->read.box].ghosts*(1+level_f->my_boxes[ block->read.box].jStride+level_f->my_boxes[ block->read.box].kStride); read_jStride = level_f->my_boxes[block->read.box ].jStride; read_kStride = level_f->my_boxes[block->read.box ].kStride; } if(block->write.box>=0){ write = level_c->my_boxes[block->write.box].vectors[id_c] + level_c->my_boxes[block->write.box].ghosts*(1+level_c->my_boxes[block->write.box].jStride+level_c->my_boxes[block->write.box].kStride); write_jStride = level_c->my_boxes[block->write.box].jStride; write_kStride = level_c->my_boxes[block->write.box].kStride; } int i,j,k; switch(restrictionType){ case RESTRICT_CELL: for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride; int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ]+read[read_ijk+1 ] + read[read_ijk +read_jStride ]+read[read_ijk+1+read_jStride ] + read[read_ijk +read_kStride]+read[read_ijk+1 +read_kStride] + read[read_ijk +read_jStride+read_kStride]+read[read_ijk+1+read_jStride+read_kStride] ) * 0.125; }}}break; case RESTRICT_FACE_I: for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride; int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+read_jStride ] + read[read_ijk +read_kStride] + read[read_ijk+read_jStride+read_kStride] ) * 0.25; }}}break; case RESTRICT_FACE_J: for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride; int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+1 ] + read[read_ijk +read_kStride] + read[read_ijk+1+read_kStride] ) * 0.25; }}}break; case RESTRICT_FACE_K: for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride; int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+1 ] + read[read_ijk +read_jStride] + read[read_ijk+1+read_jStride] ) * 0.25; }}}break; } } //------------------------------------------------------------------------------------------------------------------------------ // perform a (inter-level) restriction void restriction(level_type * level_c, int id_c, level_type *level_f, int id_f, int restrictionType){ uint64_t _timeCommunicationStart = CycleTime(); uint64_t _timeStart,_timeEnd; int buffer=0; int n; int my_tag = (level_f->tag<<4) | 0x5; #ifdef USE_MPI // by convention, level_f allocates a combined array of requests for both level_f sends and level_c recvs... int nMessages = level_c->restriction[restrictionType].num_recvs + level_f->restriction[restrictionType].num_sends; MPI_Request *recv_requests = level_f->restriction[restrictionType].requests; MPI_Request *send_requests = level_f->restriction[restrictionType].requests + level_c->restriction[restrictionType].num_recvs; // loop through packed list of MPI receives and prepost Irecv's... _timeStart = CycleTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_c->restriction[restrictionType].num_recvs;n++){ MPI_Irecv(level_c->restriction[restrictionType].recv_buffers[n], level_c->restriction[restrictionType].recv_sizes[n], MPI_DOUBLE, level_c->restriction[restrictionType].recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } _timeEnd = CycleTime(); level_f->cycles.restriction_recv += (_timeEnd-_timeStart); // pack MPI send buffers... _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->restriction[restrictionType].num_blocks[0]) for(buffer=0;buffer<level_f->restriction[restrictionType].num_blocks[0];buffer++){ restriction_pc_block(level_c,id_c,level_f,id_f,&level_f->restriction[restrictionType].blocks[0][buffer],restrictionType); } _timeEnd = CycleTime(); level_f->cycles.restriction_pack += (_timeEnd-_timeStart); // loop through MPI send buffers and post Isend's... _timeStart = CycleTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_f->restriction[restrictionType].num_sends;n++){ MPI_Isend(level_f->restriction[restrictionType].send_buffers[n], level_f->restriction[restrictionType].send_sizes[n], MPI_DOUBLE, level_f->restriction[restrictionType].send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } _timeEnd = CycleTime(); level_f->cycles.restriction_send += (_timeEnd-_timeStart); #endif // perform local restriction[restrictionType]... try and hide within Isend latency... _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->restriction[restrictionType].num_blocks[1]) for(buffer=0;buffer<level_f->restriction[restrictionType].num_blocks[1];buffer++){ restriction_pc_block(level_c,id_c,level_f,id_f,&level_f->restriction[restrictionType].blocks[1][buffer],restrictionType); } _timeEnd = CycleTime(); level_f->cycles.restriction_local += (_timeEnd-_timeStart); // wait for MPI to finish... #ifdef USE_MPI _timeStart = CycleTime(); if(nMessages)MPI_Waitall(nMessages,level_f->restriction[restrictionType].requests,level_f->restriction[restrictionType].status); _timeEnd = CycleTime(); level_f->cycles.restriction_wait += (_timeEnd-_timeStart); // unpack MPI receive buffers _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->restriction[restrictionType].num_blocks[2]) for(buffer=0;buffer<level_c->restriction[restrictionType].num_blocks[2];buffer++){ CopyBlock(level_c,id_c,&level_c->restriction[restrictionType].blocks[2][buffer]); } _timeEnd = CycleTime(); level_f->cycles.restriction_unpack += (_timeEnd-_timeStart); #endif level_f->cycles.restriction_total += (uint64_t)(CycleTime()-_timeCommunicationStart); }
snefru_fmt_plug.c
/* Snefru cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_snefru_256; extern struct fmt_main fmt_snefru_128; #elif FMT_REGISTERS_H john_register_one(&fmt_snefru_256); john_register_one(&fmt_snefru_128); #else #include <string.h> #include "arch.h" #include "snefru.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // OMP_SCALE tuned on core i7 quad core HT // 128kb 256kb // 1 - 214k 215k // 64 - 1435k 1411k // 128 - 1474k 1902k *** this was chosen // 256 - 1508k 1511k // 512 - 1649k 1564k #define OMP_SCALE 128 #endif #include "memdbg.h" // Snefru-128 and Snefru-256 are the real format labels #define FORMAT_LABEL "Snefru" #define FORMAT_TAG "$snefru$" #define TAG_LENGTH 8 #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE128 16 #define BINARY_SIZE256 32 #define CMP_SIZE 16 #define SALT_SIZE 0 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 static struct fmt_tests snefru_128_tests[] = { {"53b8a9b1c9ed00174d88d705fb7bae30", "mystrongpassword"}, {"$snefru$53b8a9b1c9ed00174d88d705fb7bae30", "mystrongpassword"}, {NULL} }; static struct fmt_tests snefru_256_tests[] = { {"$snefru$4170e04e900e6221562ceb5ff6ea27fa9b9b0d9587add44a4379a02619c5a106", "mystrongpassword"}, {"4170e04e900e6221562ceb5ff6ea27fa9b9b0d9587add44a4379a02619c5a106", "mystrongpassword"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE256 / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self, int len) { char *p; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (strlen(p) != len) return 0; while(*p) if(atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static int valid256(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 64); } static int valid128(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 32); } static void *get_binary_256(char *ciphertext) { static union { unsigned char c[32]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < 32; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void *get_binary_128(char *ciphertext) { static union { unsigned char c[16]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < 16; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static int crypt_256(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { snefru_ctx ctx;; rhash_snefru256_init(&ctx); rhash_snefru_update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index])); rhash_snefru_final(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int crypt_128(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { snefru_ctx ctx;; rhash_snefru128_init(&ctx); rhash_snefru_update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index])); rhash_snefru_final(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], CMP_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], CMP_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void snefru_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } static char *prepare(char *fields[10], struct fmt_main *self) { static char buf[64+TAG_LENGTH+1]; char *hash = fields[1]; int len = strlen(hash); if ( (len == 64 || len == 32) && valid(hash, self, len) ) { sprintf(buf, "%s%s", FORMAT_TAG, hash); return buf; } return hash; } struct fmt_main fmt_snefru_256 = { { "Snefru-256", "", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE256, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif snefru_256_tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid256, fmt_default_split, get_binary_256, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, snefru_set_key, get_key, fmt_default_clear_keys, crypt_256, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_snefru_128 = { { "Snefru-128", "", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE128, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif snefru_128_tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid128, fmt_default_split, get_binary_128, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, snefru_set_key, get_key, fmt_default_clear_keys, crypt_128, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__identity_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_uint32 // op(A') function: GB_tran__identity_int16_uint32 // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_uint32 ( int16_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__isfinite_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isfinite_bool_fp64 // op(A') function: GB_unop_tran__isfinite_bool_fp64 // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isfinite_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isfinite_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define DrawEpsilon (1.0e-10) /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(PrimitiveInfo *,const char *); static void TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(PrimitiveInfo *,const size_t), TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireMagickMemory(sizeof(*draw_info)); if (draw_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (clone_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL, sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern, (size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->gradient.stops, draw_info->gradient.stops,(size_t) number_stops* sizeof(*clone_info->gradient.stops)); } if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); clone_info->bounds=draw_info->bounds; clone_info->clip_units=draw_info->clip_units; clone_info->render=draw_info->render; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info, % const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int CompareEdges(const void *x,const void *y) { register const EdgeInfo *p, *q; /* Compare two edges. */ p=(const EdgeInfo *) x; q=(const EdgeInfo *) y; if ((p->points[0].y-DrawEpsilon) > q->points[0].y) return(1); if ((p->points[0].y+DrawEpsilon) < q->points[0].y) return(-1); if ((p->points[0].x-DrawEpsilon) > q->points[0].x) return(1); if ((p->points[0].x+DrawEpsilon) < q->points[0].x) return(-1); if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)- (p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0) return(1); return(-1); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) ResetMagickMemory(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) ResetMagickMemory(&point,0,sizeof(point)); (void) ResetMagickMemory(&bounds,0,sizeof(bounds)); for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < DrawEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),CompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; } coordinates--; /* Eliminate duplicate points. */ if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= DrawEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= DrawEpsilon)) { path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; if ((fabs(p.x-primitive_info[i].point.x) < DrawEpsilon) && (fabs(p.y-primitive_info[i].point.y) < DrawEpsilon)) continue; /* Mark the p point as open if it does not match the q. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo % structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= DrawEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -DrawEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= DrawEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -DrawEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,image,1,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; (void) InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, % PolygonInfo *polygon_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, const PolygonInfo *polygon_info,ExceptionInfo *exception) { DrawInfo *clone_info; double mid; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) (void) QueryColorCompliance("red",AllCompliance,&clone_info->stroke, exception); else (void) QueryColorCompliance("green",AllCompliance,&clone_info->stroke, exception); start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); } } (void) QueryColorCompliance("blue",AllCompliance,&clone_info->stroke, exception); start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *name,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the name of the clip path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *name,ExceptionInfo *exception) { char filename[MagickPathExtent]; Image *clip_mask; const char *value; DrawInfo *clone_info; MagickStatusType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); (void) FormatLocaleString(filename,MagickPathExtent,"%s",name); value=GetImageArtifact(image,filename); if (value == (const char *) NULL) return(MagickFalse); clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (clip_mask == (Image *) NULL) return(MagickFalse); (void) QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", draw_info->clip_mask); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,value); (void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); clone_info->clip_mask=(char *) NULL; status=NegateImage(clip_mask,MagickFalse,exception); (void) SetImageMask(image,ReadPixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); status&=DrawImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { DrawInfo *clone_info; double length, maximum_length, offset, scale, total_length; MagickStatusType status; PrimitiveInfo *dash_polygon; register ssize_t i; register double dx, dy; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+1UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*(draw_info->dash_pattern[0]-0.5); offset=fabs(draw_info->dash_offset) >= DrawEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*(draw_info->dash_pattern[n]+0.5); continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot((double) dx,dy); if (fabs(length) < DrawEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); j=1; } else { if ((j+1) > (ssize_t) (2*number_vertices)) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=DrawEpsilon; dash_polygon[j].point.y+=DrawEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline void TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->point=point; } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, factor, points_extent, primitive_extent; DrawInfo **graphic_context; MagickBooleanType proceed; MagickSizeType number_points; MagickStatusType status; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_stops; ssize_t j, k, n; StopInfo *stops; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (*draw_info->primitive != '@') primitive=AcquireString(draw_info->primitive); else primitive=FileToString(draw_info->primitive+1,~0UL,exception); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"MVG",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=6553; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MagickPathExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("clip-path",keyword) == 0) { /* Create clip mask. */ GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->clip_mask,token); (void) DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) status=MagickFalse; else graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->fill_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->fill.alpha=(MagickRealType) (QuantumRange- ClampToQuantum((MagickRealType) QuantumRange*(1.0-factor* StringToDouble(token,&next_token)))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) status=MagickFalse; else graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) status=MagickFalse; else graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) status=MagickFalse; else graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) status=MagickFalse; else graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("line",keyword) == 0) primitive_type=LinePrimitive; else status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->alpha=(Quantum) (QuantumRange*(1.0- (QuantumScale*graphic_context[n]->alpha*(1.0-factor* StringToDouble(token,&next_token))))); graphic_context[n]->fill_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->fill_alpha*(1.0-factor*StringToDouble(token, &next_token)))); graphic_context[n]->stroke_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->stroke_alpha*(1.0-factor*StringToDouble(token, &next_token)))); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) break; if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if (graphic_context[n]->clip_mask != (char *) NULL) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) (void) SetImageMask(image,ReadPixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("pattern",token) == 0) break; status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) { char name[MagickPathExtent]; GetNextToken(q,&q,extent,token); (void) FormatLocaleString(name,MagickPathExtent,"%s",token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) SetImageArtifact(image,name,token); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo pattern_bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); pattern_bounds.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.width=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.height=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double)pattern_bounds.width, (double)pattern_bounds.height,(double)pattern_bounds.x, (double)pattern_bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); break; } if (LocaleCompare("defs",token) == 0) break; status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; GetNextToken(q,&q,extent,token); stops[number_stops-1].offset=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->stroke_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias= StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2UL*x+2UL), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) status=MagickFalse; else graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) status=MagickFalse; else graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->stroke.alpha=(MagickRealType) (QuantumRange- ClampToQuantum((MagickRealType) QuantumRange*(1.0-factor* StringToDouble(token,&next_token)))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= DrawEpsilon) || (fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) || (fabs(affine.sy-1.0) >= DrawEpsilon) || (fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); continue; } /* Parse the primitive attributes. */ i=0; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; i++; if (i < (ssize_t) number_points) continue; number_points<<=1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].text=(char *) NULL; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ points_extent=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { points_extent*=5; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); points_extent*=5; points_extent+=2*ceil((double) MagickPI*radius)+6*BezierQuantum+360; break; } case BezierPrimitive: { if (primitive_info[j].coordinates > 107) (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); points_extent=(double) (BezierQuantum*primitive_info[j].coordinates); break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); points_extent=1; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } points_extent++; } points_extent=points_extent*BezierQuantum; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); points_extent=2*ceil((double) MagickPI*radius)+6*BezierQuantum+360; break; } default: break; } if (((double) ((size_t) points_extent)) < points_extent) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); break; } if (((MagickSizeType) (i+points_extent)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=points_extent+1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } } switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceRoundRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } TraceArc(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceEllipse(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceCircle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: break; case PolygonPrimitive: { primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } TraceBezier(primitive_info+j,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { i=(ssize_t) (j+TracePath(primitive_info+j,token)); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) status=MagickFalse; else primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } } if (primitive_info == (PrimitiveInfo *) NULL) break; if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } if (primitive_info->text != (char *) NULL) primitive_info->text=(char *) RelinquishMagickMemory( primitive_info->text); proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))/gradient->radii.x; v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))/gradient->radii.y; return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= DrawEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset/=length; for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=repeat/length; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#000000ff",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=DrawImage(*pattern,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) ResetMagickMemory(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta < 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta > alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=1.0/alpha; beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < DrawEpsilon) { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; /* Compute bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates == 0) return(MagickTrue); polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >= image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=(mid+1.0); bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >= image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=(mid+1.0); bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >= image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=(mid+1.0); bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >= image->rows ? (double) image->rows-1 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); fill_alpha=fill_alpha*fill_color.alpha; CompositePixelOver(image,&fill_color,fill_alpha,q,(double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); stroke_alpha=stroke_alpha*stroke_color.alpha; CompositePixelOver(image,&stroke_color,stroke_alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= DrawEpsilon) || (fabs(q.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= DrawEpsilon) || (fabs(p.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_image=ReadInlineImage(clone_info,primitive_info->text, exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_image=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_image == (Image *) NULL) break; (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void) SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; status&=DrawAffineImage(image,composite_image,&affine,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) && (fabs(scale*draw_info->stroke_width) >= DrawEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); (void) DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; closed_path= (fabs(primitive_info[i-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[i-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; i=(ssize_t) primitive_info[0].coordinates; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*DrawEpsilon; linecap[2].point.x+=2.0*DrawEpsilon; linecap[2].point.y+=2.0*DrawEpsilon; linecap[3].point.y+=2.0*DrawEpsilon; linecap[4].primitive=UndefinedPrimitive; (void) DrawPolygonPrimitive(image,draw_info,linecap,exception); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { stroke_polygon=TraceStrokePolygon(draw_info,p); status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); if (status == 0) break; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); q=p+p->coordinates-1; closed_path=(fabs(q->point.x-p->point.x) < DrawEpsilon) && (fabs(q->point.y-p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { DrawRoundLinecap(image,draw_info,p,exception); DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) ResetMagickMemory(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#0000",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->debug=IsEventLogging(); draw_info->stroke_antialias=clone_info->antialias; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= DrawEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radii; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radii.x=fabs(center.x-start.x); radii.y=fabs(center.y-start.y); TraceEllipse(primitive_info,center,radii,degrees); } static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; PointInfo center, points[3], radii; register double cosine, sine; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { TracePoint(primitive_info,end); return; } radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon)) { TraceLine(primitive_info,start,end); return; } cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < DrawEpsilon) { TraceLine(primitive_info,start,end); return; } if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+DrawEpsilon)))); p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; TraceBezier(p,4); p+=p->coordinates; } primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceBezier(PrimitiveInfo *primitive_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coeficients. */ quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { TracePoint(p,points[i]); p+=p->coordinates; } TracePoint(p,end); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); } static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; TraceEllipse(primitive_info,start,offset,degrees); } static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo stop,const PointInfo degrees) { double delta, step, y; PointInfo angle, point; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon)) { TracePoint(primitive_info,start); return; } delta=2.0/MagickMax(stop.x,stop.y); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/(4*(MagickPI/delta/2+0.5)); angle.x=DegreesToRadians(degrees.x); y=degrees.y; while (y < degrees.x) y+=360.0; angle.y=DegreesToRadians(y); for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { TracePoint(primitive_info,start); if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return; } TracePoint(primitive_info+1,end); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; } static size_t TracePath(PrimitiveInfo *primitive_info,const char *path) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; PointInfo end = {0.0, 0.0}, points[4] = { {0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle; MagickBooleanType large_arc, sweep; PointInfo arc; /* Compute arc points. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); TraceArcPath(q,point,end,arc,angle,large_arc,sweep); q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { if (q != primitive_info) { primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; } i=0; do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; TracePoint(q,point); q+=q->coordinates; if ((i != 0) && (attribute == (int) 'M')) { TracePoint(q,point); q+=q->coordinates; } } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Compute bezier points. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Compute bezier points. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { point=start; TracePoint(q,point); q+=q->coordinates; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; z_count++; break; } default: { if (isalpha((int) ((unsigned char) attribute)) != 0) (void) FormatLocaleFile(stderr,"attribute not recognized: %c\n", attribute); break; } } } primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; TracePoint(p,start); p+=p->coordinates; point.x=start.x; point.y=end.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,end); p+=p->coordinates; point.x=end.x; point.y=start.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,start); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceRoundRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, offset, point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; offset.x=fabs(end.x-start.x); offset.y=fabs(end.y-start.y); if (arc.x > (0.5*offset.x)) arc.x=0.5*offset.x; if (arc.y > (0.5*offset.y)) arc.y=0.5*offset.y; point.x=start.x+offset.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+offset.x-arc.x; point.y=start.y+offset.y-arc.y; degrees.x=0.0; degrees.y=90.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+offset.y-arc.y; degrees.x=90.0; degrees.y=180.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; TracePoint(p,primitive_info->point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info) { typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx, dy, inverse_slope, slope, theta; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) || (polygon_primitive == (PrimitiveInfo *) NULL)) return((PrimitiveInfo *) NULL); (void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t) number_vertices*sizeof(*polygon_primitive)); closed_path= (fabs(primitive_info[number_vertices-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[number_vertices-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon)) break; } if (n == (ssize_t) number_vertices) n=(ssize_t) number_vertices-1L; slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < DrawEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.p=dy.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.p) < DrawEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < DrawEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.q=dy.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.q) < DrawEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < DrawEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360)) { if (~max_strokes < (6*BezierQuantum+360)) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); } else { max_strokes+=6*BezierQuantum+360; path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, sizeof(*path_p)); path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, sizeof(*path_q)); } if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p=(PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } } dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
odd-even-merge-sort_hybrid.c
#include<stdio.h> #include<stdlib.h> #include<time.h> #include<omp.h> #include"mpi.h" #define MAX(a,b) ((a<b)?b:a) #define MIN(a,b) ((a>=b)?b:a) #define ODD(A,n,i) A[n+2*i] #define EVEN(A,n,i) A[n+2*i+1] void print_array(int *A,int l,int r) { printf("\n"); for(int i=l;i<r;i++) printf("%3d ",i); printf("\n"); for(int i=l;i<r;i++) printf("%3d ",A[i]); printf("\n"); } int *generate(int n) { int *A=(int *)malloc(sizeof(int)*n); srand(time(NULL)); for(int i=0;i<n;i++) A[i]=rand()%n; return A; } int *copy(int *A,int n) { int *C=(int *)malloc(sizeof(int)*n); for(int i=0;i<n;i++) C[i]=A[i]; return C; } int compare(const void *a,const void *b){return ( *(int *)a < *(int *)b )?0:1;} void validate(int *A1,int *A2,int n){for(int i=0;i<n;i++)if(A1[i]!=A2[i]){printf("Failure\n");return;}printf("Success!\n");} void odd_even_merge_sort(int *A,int l,int c,int r); void odd_even_merge(int *A,int l,int c,int r); void odd_even_merge2(int *A,int s); void odd_even_sort_omp(int *A,int l,int c,int r); //int unsort[32]={2,3,18,9,23,11,4,25,0,13,6,21,14,27,1,10,15,5,16,17,8,24,22,12,19,29,26,30,28,7,31,20}; //int unsort_array[16]={2,3,9,11,4,13,6,14,1,10,15,5,16,8,12,7}; //int unsort_array[8]={2,3,4,6,1,5,8,7}; int log_2(int n){int i=0;while(n!=0){n=n>>1;i++;}return i-1;} int main(int argc,char* argv[]) { char *n="32"; int N=atoi(argc==2?argv[1]:n); int *unpsort=generate(N); int *unqsort=copy(unpsort,N); int size,rank; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&size); MPI_Comm_rank(MPI_COMM_WORLD,&rank); double odd_even_parallel_t = omp_get_wtime(); int *divA=(int *)malloc(sizeof(int)*N/size); int divAsize=N/size; MPI_Scatter(unpsort,divAsize,MPI_INT,divA,divAsize,MPI_INT,0,MPI_COMM_WORLD); odd_even_merge_sort(divA,0,divAsize/2,divAsize); MPI_Gather(divA,divAsize,MPI_INT,unpsort,divAsize,MPI_INT,0,MPI_COMM_WORLD); odd_even_merge(unpsort,0,N/2,N); odd_even_parallel_t = omp_get_wtime()-odd_even_parallel_t; MPI_Barrier(MPI_COMM_WORLD); if(rank==0) { double qsort_t = omp_get_wtime(); qsort(unqsort,N,sizeof(int),&compare); qsort_t=omp_get_wtime()-qsort_t; //print_array(unqsort,0,N); validate(unpsort,unqsort,N); printf("qsort=%lf,Parallel = %lf sec (%lf times speedup)\n", qsort_t, odd_even_parallel_t,(qsort_t/odd_even_parallel_t)); } MPI_Finalize(); return 0; } void odd_even_merge(int *A,int l,int c,int r) { /** printf("enter odd_even_merge(n=%d)\n",n); print_array(A,l,r); **/ int n=c-l; int *D=(int *)malloc(sizeof(int)*n); int *E=(int *)malloc(sizeof(int)*n); #pragma omp parallel sections num_threads(2) { #pragma omp section { int t0=0,t1=0; for(int i=0;i<n;i++) { if( t0 == n/2 || ( t1 != n/2 && ODD(A,l,t0) > ODD(A,c,t1) ) ) D[i]=ODD(A,c,t1++); else D[i]=ODD(A,l,t0++); } } #pragma omp section { int t2=0,t3=0; for(int i=0;i<n;i++) { if( t2 == n/2 || ( t3 != n/2 && EVEN(A,l,t2) > EVEN(A,c,t3)) ) E[i]=EVEN(A,c,t3++); else E[i]=EVEN(A,l,t2++); } } } //printf("D:");print_array(D,0,n); //printf("E:");print_array(E,0,n); A[l]=D[0]; for(int i=1;i<n;i++) { A[l+2*i-1]=MIN(D[i],E[i-1]); A[l+2*i]=MAX(D[i],E[i-1]); } A[r-1]=E[n-1]; //print_array(A,l,n); } void odd_even_merge2(int *A,int s) { int TMP[4]={A[s+0],A[s+1],A[s+2],A[s+3]}; A[s+0]=MIN(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])); A[s+1]=MIN(MAX(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])),MIN(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3]))); A[s+2]=MAX(MAX(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])),MIN(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3]))); A[s+3]=MAX(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3])); } void odd_even_merge_sort(int *A,int l,int c,int r) { //printf("odd_even_merge_sort(%d,%d,%d)\n",l,c,r); //print_array(A,l,r); if(c-l==4) { odd_even_merge2(A,l); odd_even_merge2(A,c); odd_even_merge(A,l,c,r); return; } odd_even_merge_sort(A,l,(l+c)/2,c); odd_even_merge_sort(A,c,(c+r)/2,r); odd_even_merge(A,l,c,r); }
es1.h
#ifndef es1_h #define es1_h #include <iostream> #include <omp.h> #include <cmath> #define pi 3.14159 using namespace std; double es1Static(unsigned dim, unsigned nmt) { double *vec = new double [dim * dim]; double start = omp_get_wtime(); #pragma omp parallel num_threads(nmt) { omp_set_num_threads(nmt); #pragma omp for schedule(static) for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { vec[(i * dim) + j] = 15 * sin(j) * cos(i) * sqrt(2 * i) * pi * pow(j, 6); } } } double end = omp_get_wtime(); delete [] vec; return end - start; } double es1Dynamic(unsigned dim, unsigned nmt) { double *vec = new double [dim * dim]; double start = omp_get_wtime(); #pragma omp parallel num_threads(nmt) { #pragma omp for schedule(dynamic) for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { vec[(i * dim) + j] = 15 * sin(j) * cos(i) * sqrt(2 * i) * pi * pow(j, 6); } } } double end = omp_get_wtime(); delete [] vec; return end - start; } void es1() { cout << "Inserisci numero threads" << endl; unsigned nmt; cin >> nmt; cout << "Numero thread in utilizzo: " << nmt << endl; cout << endl <<"Inserisci dimensione della matrice" << endl; unsigned dim; cin >> dim; cout << "Dimensione della matrice " << dim << " x " << dim << endl; cout << "Inserisci numero positivo per scheduling statico, negativo per dinamico" << endl; double scelta; cin >> scelta; if (scelta > 0) { cout << "Tempo: " << es1Static(dim, nmt); } else if (scelta < 0) { cout << "Tempo: " << es1Dynamic(dim, nmt); } else { cout << "Non valido!" << endl; } } #endif
sample_nested.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See LICENSE.txt in top-level directory. */ #include <omp.h> #include <stdio.h> #include <sys/time.h> #include <stdlib.h> int main(int argc, char * argv[]) { int size=(argc>1)?atoi(argv[1]):100; int i,j,k=0; int nthreads; struct timeval t_start, t_end; double time; double *a = (double *)malloc(sizeof(double)*size*size); #pragma omp parallel { nthreads=omp_get_num_threads(); } for(i=0;i<size*size;i++){ a[i]=i; } gettimeofday(&t_start,NULL); #pragma omp parallel for for(i=0;i<size;i++){ #pragma omp parallel for for(j=0;j<size;j++){ a[i*size+j]=a[i*size+j]*0.9; } } gettimeofday(&t_end,NULL); time=(t_end.tv_sec * 1000000 + t_end.tv_usec) - (t_start.tv_sec * 1000000 + t_start.tv_usec); printf("%d %f\n",nthreads,time/1000000.0); for(i=0;i<size*size;i++){ if(a[i]!=i*0.9){ printf("a[%d]=%f\n",i,a[i]); return 1; } } }
GB_binop__min_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_int64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__min_int64) // A.*B function (eWiseMult): GB (_AemultB_03__min_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int64) // A*D function (colscale): GB (_AxD__min_int64) // D*A function (rowscale): GB (_DxB__min_int64) // C+=B function (dense accum): GB (_Cdense_accumB__min_int64) // C+=b function (dense accum): GB (_Cdense_accumb__min_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int64) // C=scalar+B GB (_bind1st__min_int64) // C=scalar+B' GB (_bind1st_tran__min_int64) // C=A+scalar GB (_bind2nd__min_int64) // C=A'+scalar GB (_bind2nd_tran__min_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT64 || GxB_NO_MIN_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__min_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__min_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__min_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RBF_evaluate_Fast.c
/* This file is part of redbKIT. * Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne (EPFL) * Author: Federico Negri <federico.negri@epfl.ch> */ #include "mex.h" #include <stdio.h> #include <math.h> #include "blas.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #else #warning "OpenMP not enabled. Compile with mex RBF_evaluate_Fast.c CFLAGS="\$CFLAGS -fopenmp" LDFLAGS="\$LDFLAGS -fopenmp"" #endif /*************************************************************************/ double RBF_function(double d, double c, char *RBF_function_name) { double val = -1; if (strcmp(RBF_function_name, "gaussian")==0) { val = exp(-0.5*d*d/(c*c)); return val; } if (strcmp(RBF_function_name, "thinplate")==0) { val = d*d*log(d+1); return val; } if (strcmp(RBF_function_name, "cubic")==0) { val = (d*d*d); return val; } if (strcmp(RBF_function_name, "multiquadric")==0) { val = sqrt(1+d*d/(c*c)); return val; } return val; } /*************************************************************************/ void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[]) { char *RBF_function_name = mxArrayToString(prhs[0]); /* Check for proper number of arguments */ if(nrhs!=5) { mexErrMsgTxt("5 inputs are required."); } else if(nlhs>1) { mexErrMsgTxt("Too many output arguments."); } double* interp_points = mxGetPr(prhs[1]); int nI = mxGetN(prhs[1]); double* x = mxGetPr(prhs[2]); int dimX = mxGetM(prhs[2]); int nPoints = mxGetN(prhs[2]); double* tmpPtr = mxGetPr(prhs[3]); double constant = tmpPtr[0]; double* coeff = mxGetPr(prhs[4]); plhs[0] = mxCreateDoubleMatrix(nPoints,1, mxREAL); double* I_f = mxGetPr(plhs[0]); int i; #pragma omp parallel for shared(I_f,x) private(i) firstprivate(coeff,interp_points,nI,dimX,constant,RBF_function_name) for (i = 0; i < nPoints; i++) { int l, k; I_f[i] = 0.0; for (k = 0; k < nI; k++) { /*d = distance(x[:,i], interp_points(:,k));*/ double tmp = 0; for (l = 0; l < dimX; l++) { double tmp2 = (x[l+dimX*i] - interp_points[l+dimX*k]); tmp += (tmp2*tmp2); } double d = sqrt(tmp); I_f[i] += coeff[k] * RBF_function(d, constant, RBF_function_name); } I_f[i] += coeff[nI]; for (k = 0; k < dimX; k++) { I_f[i] += coeff[k+nI+1]*x[k+dimX*i];; } } mxFree(RBF_function_name); } /*************************************************************************/
IntSort.h
/* * IntSort.h * * Author: Manuel Penschuck (networkit@manuel.jetzt) */ #ifndef INTSORT_H_ #define INTSORT_H_ #include <limits> #include <array> #include <vector> #include <memory> #include <tuple> #include <algorithm> #include <type_traits> #include <cassert> #include <numeric> #include <omp.h> namespace intsort { namespace IntSortInternal { template<typename T> uint32_t ilog2(T x) { if (!x) return 0; const typename std::make_unsigned<T>::type input = x; int i = 0; while (x >>= 1) i++; i += (input > (1llu << i)); assert((1llu << i) >= input); assert((1llu << i) / 2 < input); return i; } template <typename T1, typename T2> auto idiv_ceil(T1 a, T2 b) -> decltype(a / b) { return static_cast<T1>((static_cast<unsigned long long>(a)+b-1) / b); } template<typename T, typename Key, typename KeyExtract, size_t RADIX_WIDTH=8> class IntSortImpl { // compute mask and shifts to later compute the queue index static_assert(RADIX_WIDTH >= 1, "Radix has to be at least 2"); static_assert(RADIX_WIDTH <= 8 * sizeof(T), "Radix is not allowed to exceed numer of bits in T"); static constexpr size_t no_queues = 1llu << RADIX_WIDTH; using IndexArray = std::array<size_t, no_queues>; public: IntSortImpl(KeyExtract key_extract, const Key max_key) : key_extract{key_extract}, max_key{max_key}, max_bits{ilog2(max_key)}, msb_radix_width{std::min(max_bits, RADIX_WIDTH)}, msb_radix{Key{1} << msb_radix_width}, msb_shift{max_bits - msb_radix_width}, lsb_remaining_width{max_bits - msb_radix_width}, no_iters{static_cast<int>(idiv_ceil(std::max<size_t>(1llu, lsb_remaining_width), RADIX_WIDTH))}, adaptive_width{idiv_ceil(lsb_remaining_width, no_iters)}, adaptive_no_queues{1llu << adaptive_width}, mask{(Key(1) << adaptive_width) - 1} { } template<typename Iter, typename IterBuf> bool sort(const Iter begin, const IterBuf buf_begin, const size_t n) { if (n < 2 || max_key < 1) return false; // in these cases the input is trivially sorted const auto max_threads = std::min<int>(omp_get_max_threads(), idiv_ceil(n, 1 << 17)); // compute how many iterations we need to sort numbers [0, ..., max_key], i.e. log(max_key, base=RADIX_WIDTH) std::array<size_t, no_queues + 1> splitter; splitter[no_queues] = n; // perform the first round as MSB radix sort which will yield indendent // chunks which then can be sorted pleasingly parallel. We add some padding // to thread_counter to avoid false sharing std::vector< std::array<size_t, no_queues + 64 / sizeof(size_t)> > thread_counters(max_threads); #pragma omp parallel num_threads(max_threads) { // symmetry breaking const auto tid = omp_get_thread_num(); const auto no_threads = omp_get_num_threads(); // figure out workload for each thread const size_t chunk_size = idiv_ceil(n, no_threads); const auto chunk = std::make_pair(chunk_size * tid, std::min(chunk_size * (tid + 1), n)); // thread-local counters and iterators IndexArray queue_pointer; { const auto input_begin = begin + chunk.first; const auto input_end = begin + chunk.second; auto &counters = thread_counters[tid]; counters.fill(0); for (auto it = input_begin; it != input_end; ++it) { counters[key_extract(*it) >> msb_shift]++; } if (no_threads > 1) { #pragma omp barrier } { size_t index = 0; size_t tmp = 0; // avoid warnings for (size_t qid = 0; qid != no_queues; ++qid) { for (int ttid = 0; ttid < no_threads; ttid++) { if (ttid == tid) tmp = index; index += thread_counters[ttid][qid]; } queue_pointer[qid] = tmp; } // store splitters which will be processed pleasingly parallel if (0 == tid) { std::copy(queue_pointer.cbegin(), queue_pointer.cend(), splitter.begin()); } } for (auto it = input_begin; it != input_end; ++it) { const auto key = key_extract(*it); const auto shifted = key >> msb_shift; const auto index = queue_pointer[shifted]++; buf_begin[index] = std::move(*it); } } if (lsb_remaining_width) { if (no_threads > 1) { #pragma omp barrier } IndexArray counters; // Now solve parts independently #pragma omp for nowait for (int i = 0; i < msb_radix; ++i) { if (splitter[i] == splitter[i + 1]) continue; const size_t size = splitter[i + 1] - splitter[i]; auto input_base = buf_begin + splitter[i]; auto buffer_base = begin + splitter[i]; // iteration 0 { const auto input_begin = input_base; const auto input_end = input_base + size; // in the first round we have to count the // elements for each queue; later we do it while // moving elements std::fill_n(counters.begin(), adaptive_no_queues, 0); for (auto it = input_begin; it != input_end; ++it) { counters[get_queue_index(key_extract(*it), 0)]++; } move_to_queues(input_base, input_base + size, buffer_base, counters, 0, no_iters != 1); std::swap(input_base, buffer_base); } // iterations 1 to no_iters-2 for(int iteration = 1; iteration < no_iters-1; iteration++) { move_to_queues(input_base, input_base + size, buffer_base, counters, iteration, true); std::swap(input_base, buffer_base); } // last iteration (no_iters - 1) if (no_iters > 1) move_to_queues(input_base, input_base + size, buffer_base, counters, no_iters-1, false); } } } return !lsb_remaining_width || !(no_iters % 2); } private: // parameters KeyExtract key_extract; const Key max_key; const size_t max_bits; const size_t msb_radix_width; const size_t msb_radix; const size_t msb_shift; const size_t lsb_remaining_width; const int no_iters; const size_t adaptive_width; const size_t adaptive_no_queues; const Key mask; // helpers inline size_t get_queue_index(Key key, int iteration) const { return (key >> (iteration * adaptive_width)) & mask; }; template <typename IterT, typename BufT, typename CounterT> void move_to_queues (const IterT begin, const IterT end, const BufT buffer_base, CounterT& counters, int iteration, const bool count) { IndexArray pointers; pointers[0] = 0; std::partial_sum(counters.cbegin(), counters.cbegin() + (adaptive_no_queues - 1), pointers.begin() + 1); if (count) std::fill_n(counters.begin(), adaptive_no_queues, 0); for (auto it = begin; it != end; ++it) { const Key key = key_extract(*it); const auto index = pointers[get_queue_index(key, iteration)]++; if (count) counters[get_queue_index(key, iteration+1)]++; buffer_base[index] = std::move(*it); } } }; } // ! namespace IntSortInternal /** * Parallel Radix Sort of a sequence specified by the Random Access Iterators * @a begin and @a end. Each element must be convertable to an unsigned integer * Key using the functor @a key_extract (i.e. key_extract(*begin) must be implicitly * cast to Key). * * If the largest key in the data set is known, it should be provided to achieve * a potentially faster sorting. It is undefined behaviour if there exists a key * larger than max_key. * * In the first iteration the algorithm sorts the highest relevant bits of the key, * resulting in several sub problems which are then sorted pleasingly parallel with * an LSB Radix Sort variant (i.e. starting from the least significant bits). * The algorithm does not included explicit load balancing measures; in the worst * case all elements contain the same key, which results in a single active thread * after the first iteration. This is typically not an issue if the input is "somewhat" * uniformly distributed. * * @note RADIX_WIDTH specifies the number of bits sorted in a single iteration and * results in 2**RADIX_WIDTH many queues. Due to cache effects typically 7 or 8 * yields the best performance. */ template<typename Iter, typename KeyExtract, typename Key, size_t RADIX_WIDTH = 8, typename T = typename std::iterator_traits<Iter>::value_type> inline void intsort(const Iter begin, const Iter end, KeyExtract key_extract, const Key max_key = std::numeric_limits<Key>::max()) { const size_t n = std::distance(begin, end); std::vector<T> buffer(n); IntSortInternal::IntSortImpl<T, Key, KeyExtract, RADIX_WIDTH> sorter(key_extract, max_key); bool need_buffer = sorter.sort(begin, buffer.begin(), n); if (need_buffer) std::copy(buffer.cbegin(), buffer.cend(), begin); } /** * If the data to be sorted is stored in a vector, it is beneficial to use this * specialisation, as it avoids (if necessary) copying the data from the temporary * buffer back into the input buffer in the last step. */ template<typename T, typename KeyExtract, typename Key, size_t RADIX_WIDTH = 8> inline void intsort(std::vector<T> &input, KeyExtract key_extract, const Key max_key = std::numeric_limits<Key>::max()) { auto begin = input.begin(); auto end = input.end(); const size_t n = std::distance(begin, end); std::vector<T> buffer(n); IntSortInternal::IntSortImpl<T, Key, KeyExtract, RADIX_WIDTH> sorter(key_extract, max_key); bool need_buffer = sorter.sort(begin, buffer.begin(), n); if (need_buffer) input.swap(buffer); } /** * If the data to be sorted is stored in a vector, it is beneficial to use this * specialisation, as it avoids (if necessary) copying the data from the temporary * buffer back into the input buffer in the last step. */ template<typename T, typename KeyExtract, typename Key, size_t RADIX_WIDTH = 8> inline void intsort(std::shared_ptr<T[]> &input, const size_t n, KeyExtract key_extract, const Key max_key = std::numeric_limits<Key>::max()) { auto begin = input.get(); auto end = begin + n; std::shared_ptr<T[]> buffer{new T[n]}; IntSortInternal::IntSortImpl<T, Key, KeyExtract, RADIX_WIDTH> sorter(key_extract, max_key); bool need_buffer = sorter.sort(begin, buffer.get(), n); if (need_buffer) input.swap(buffer); } /** * If the data to be sorted is stored in a vector, it is beneficial to use this * specialisation, as it avoids (if necessary) copying the data from the temporary * buffer back into the input buffer in the last step. */ template<typename T, typename KeyExtract, typename Key, size_t RADIX_WIDTH = 8> inline void intsort(std::unique_ptr<T[]> &input, const size_t n, KeyExtract key_extract, const Key max_key = std::numeric_limits<Key>::max()) { auto begin = input.get(); auto end = begin + n; std::unique_ptr<T[]> buffer{new T[n]}; IntSortInternal::IntSortImpl<T, Key, KeyExtract, RADIX_WIDTH> sorter(key_extract, max_key); bool need_buffer = sorter.sort(begin, buffer.get(), n); if (need_buffer) input.swap(buffer); } } // namespace: intsort #endif // INTSORT_H_
optQCCAvgVals.c
#include <mex.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <string.h> /* q_c.singleton = optQCMFC(condQB,prediction,Sigma_c,mu_c,c_c,mu_a_b,numColumnsPred,numColumnsShape,columnsPredShapeVec,columnsPredShapeFactorVec); * */ int min(int A, int B) { if (A < B) { return A; } else { return B; } } int max(int A, int B) { if (A > B) { return A; } else { return B; } } float expf_fast(float a) { union { float f; int x; } u; u.x = (int) (12102203 * a + 1064866805); return u.f; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* Input variables */ double *condQB = mxGetPr(prhs[0]); double *prediction = mxGetPr(prhs[1]); double *mu_a_b = mxGetPr(prhs[2]); double *factorsPrec = mxGetPr(prhs[3]); double *hashTable = mxGetPr(prhs[4]); int numColumnsPred = (int) mxGetScalar(prhs[5]); int *cmin = (int*) mxGetData(prhs[6]); int *cmax = (int*) mxGetData(prhs[7]); /* intern variables and pointers */ double* q_c = NULL; double* boundaries = NULL; int i,j,k,i1,i2; int numRows = mxGetM(prhs[0]); int numBounds = mxGetN(prhs[0])/numColumnsPred; int alphaSize = numRows*numBounds*sizeof(double); double* alpha = malloc(alphaSize); double* beta = malloc(alphaSize); double* c = malloc(numBounds*sizeof(double)); double alphaTotal,q_c_total,tmp,val,factor,cInv; double* preCalc = malloc(numRows*sizeof(double)); int idxQC,idx,idxA,idxB,idxC,idxNumRows,idxCond,idxBounds; int* A = malloc(numBounds*sizeof(int)); int* B = malloc(numBounds*sizeof(int)); /* determines when to use the hash table */ /*int limit = 10;*/ int limit2 = -30; /* int counter = 0;*/ /* switch from matlab indexing to C indexing */ plhs[0] = mxCreateDoubleMatrix(1,numRows*numBounds*numColumnsPred,mxREAL); q_c = mxGetPr(plhs[0]); plhs[1] = mxCreateDoubleMatrix(numBounds,numColumnsPred,mxREAL); boundaries = mxGetPr(plhs[1]); /* ****** start sum-product ******** */ for (j=0; j < numColumnsPred; j++) { /* for (j=0; j < 1; j++) {*/ memset(alpha, 0, alphaSize); memset(beta, 0, alphaSize); /* calculate limits of for-loops corresponding to transition matrices */ for (k=0; k < numBounds; k++) { A[k] = cmin[j + k*numColumnsPred]; B[k] = cmax[j + k*numColumnsPred]; /*printf("%d, %d: %d, %d\n",j,k,A[k],B[k]);*/ } alphaTotal = 0; /* pred index for prediction */ idxC = j*numRows*numBounds; for (i = A[0]; i <= B[0]; i++) { alpha[i] = condQB[j*numRows + i]*prediction[idxC + i]; alphaTotal += alpha[i]; } c[0] = alphaTotal; alphaTotal = 1/alphaTotal; /* normalize alpha */ for (i=A[0]; i <= B[0]; i++) { alpha[i] *= alphaTotal; } /* make forward message passing over all boundaries */ /* for boundaries 2 to numBounds */ for (k=1; k < numBounds; k++) { /* for(k=1; k < 0; k++) { */ /* preCalc index for inner loop */ factor = -0.5*factorsPrec[(k-1)*numColumnsPred + j]; idxNumRows = ((k-1)*numColumnsPred + j)*numRows; idxCond = (k*numColumnsPred + j)*numRows; idx = numRows*k; alphaTotal = 0; /* iterates over the columns of each transition matrix; corresponds to idxNonZeroA in matlab; determines the non-zero entries of the current alpha */ #pragma omp parallel for private(tmp,val,i2) reduction(+:alphaTotal) for (i1 = A[k]; i1 <= B[k]; i1++) { tmp = 0; /* iterates over the rows of transition matrices; corresponds to idxNonZeroB in matlab */ /* upper triangular matrix --> ordering constraint on boundaries */ for (i2 = A[k-1]; i2 <= min(i1,B[k-1]); i2++) { val = (i1 + 1 - mu_a_b[idxNumRows + i2]); val = val*val*factor; if (val > limit2) {tmp += alpha[idx - numRows + i2]*hashTable[(int)(-val*1000 + 0.5)];} } alpha[idx + i1] = prediction[idxC + idx + i1]*condQB[idxCond+i1]*tmp; alphaTotal += alpha[idx + i1]; } c[k] = alphaTotal; alphaTotal = 1/alphaTotal; /* normalize alpha */ for (i = A[k]; i <= B[k]; i++) { alpha[idx + i] *= alphaTotal; } } /* end for over bounds k */ /* init beta for the last node */ idxQC = j*numBounds*numRows; idxBounds = (j+1)*numBounds - 1; boundaries[idxBounds] = 0; for (i=(numBounds-1)*numRows;i<numRows*numBounds;i++) { beta[i] = 1; q_c[idxQC + i] = alpha[i]; boundaries[idxBounds] += alpha[i]*((i+1)-(numBounds-1)*numRows); } /* message backward */ for (k=numBounds-2; k >= 0; k--) { /* for (k = 0; k < 0; k++) {*/ idxCond = j*numRows + (k+1)*numColumnsPred*numRows; idxB = numRows*(k+1); idxA = j*numRows*numBounds + (k+1)*numRows; /* precalculate entries for inner loop over z_{n+1}, that are independent of z_n */ for (i=A[k+1]; i <= B[k+1]; i++) { preCalc[i] = beta[idxB + i]*prediction[idxA + i]*condQB[idxCond + i]; } /* preCalc idx for inner loop */ factor = -0.5*factorsPrec[k*numColumnsPred + j]; idxNumRows = (k*numColumnsPred + j)*numRows; idx = numRows*k; /* the outer loop (over z_n) is constrained by alpha (and therefor condQB), the inner loop over (z_{n+1}) by condQB */ q_c_total = 0; cInv = 1/c[k+1]; #pragma omp parallel for private(tmp,val,i2) reduction(+:q_c_total) for (i1 = A[k]; i1 <= B[k]; i1++) { tmp = 0; /* idxFinal */ for (i2 = max(A[k+1],i1); i2 <= B[k+1]; i2++) { val = factor*(i2 + 1 - mu_a_b[idxNumRows + i1])*(i2 + 1 - mu_a_b[idxNumRows + i1]); if (val > limit2) {tmp += preCalc[i2]*hashTable[(int)(-val*1000 + 0.5)];} } beta[idx + i1] = tmp*cInv; q_c[idxQC + idx + i1] = alpha[idx + i1]*beta[idx + i1]; q_c_total += q_c[idxQC + idx + i1]; } idxBounds = j*numBounds + k; boundaries[idxBounds] = 0; /* convert to inverse */ q_c_total = 1/q_c_total; /* normalize q_c distribution */ for (i1 = A[k]; i1 <= B[k]; i1++) { q_c[idxQC + idx + i1] *= q_c_total; boundaries[idxBounds] += q_c[idxQC + idx + i1]*(i1+1); } } } free(alpha); free(beta); free(c); free(preCalc); free(A); free(B); }
GameOfLife.c
#include <stdlib.h> #include <stdbool.h> #include <stdio.h> #include <omp.h> int main(){ const int N=20, T=100; bool *grid, *tmp_grid, *tmp_ptr; int init_pos[2]; int cnt; bool show = true; // Allocate some 1-D arrays - we'll use indexing to make it 2D // Initialise to value 0 grid = calloc(N*N, sizeof(bool)); tmp_grid = calloc(N*N, sizeof(bool)); //Place a simple structure in the top left - this should move // and reform it's shape (called a glider) // Note the indexing we use to get the position right init_pos[0] = 2; init_pos[1] = 4; grid[init_pos[1]*N + init_pos[0]+1] = 1; grid[init_pos[1]*N + init_pos[0]+2] = 1; grid[init_pos[1]*N + init_pos[0]+3] = 1; grid[(init_pos[1]+1)*N + init_pos[0]+3] = 1; grid[(init_pos[1]+2)*N + init_pos[0]+2] = 1; //Iterate over time for(int it = 1; it<= T; it++){ // Loop over cells in grid // Use a temporary for the updated state // Reset this each time // Leave all edge cells unchanged #pragma omp parallel for private(cnt) for(int ix=1; ix< N-1; ix++){ for(int iy=1; iy< N-1; iy++){ // We need to know how many True neghbours the current location has // The cell itself isn't included in the sum cnt = 0; if(grid[(iy-1)*N + ix-1]) cnt = cnt + 1; if(grid[(iy-1)*N + ix ]) cnt = cnt + 1; if(grid[(iy-1)*N + ix+1]) cnt = cnt + 1; if(grid[iy*N + ix-1]) cnt = cnt + 1; if(grid[iy*N + ix+1]) cnt = cnt + 1; if(grid[(iy+1)*N + ix-1]) cnt = cnt + 1; if(grid[(iy+1)*N + ix ]) cnt = cnt + 1; if(grid[(iy+1)*N + ix+1]) cnt = cnt + 1; // The automaton rules can be reduced to this simple condition if((cnt == 2 && grid[iy*N + ix]) || cnt == 3){ tmp_grid[iy*N + ix] = 1; }else{ tmp_grid[iy*N + ix] = 0; } } } // Swap the pointers using a temporary tmp_ptr = tmp_grid; tmp_grid = grid; grid = tmp_ptr; } // Show final state to screen if(show){ for(int iy=0; iy<N; iy++){ for(int ix=0; ix<N; ix++){ if(grid[iy*N + ix]){ printf("*"); }else{ printf(" "); } } printf("\n"); } } }
SE_fg_int_kaiser_mex.c
#include "mex.h" #include "../SE_fgg.h" #include "../SE_fkg.h" void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int); #define X prhs[0] #define HH prhs[1] #define OPT prhs[2] #define PHI_OUT plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { const int N = mxGetM(X); double* restrict x = mxGetPr(X); const double* H_per = mxGetPr(HH); SE_FGG_params params; SE_FGG_MEX_params(&params, OPT, N); // scratch arrays SE_FGG_work work; SE_FKG_allocate_workspace(&work, &params,false); // output vector PHI_OUT = mxCreateDoubleMatrix(N,1,mxREAL); double* phi = mxGetPr(PHI_OUT); // coordinates and charges const SE_state st = {.x = x, .q = NULL}; if(VERBOSE) mexPrintf("[SE%s FG(I)] N=%d, P=%d\n",PER_STR,N,params.P); #ifdef _OPENMP #pragma omp parallel default(shared) #endif { #ifdef THREE_PERIODIC SE_FGG_extend_fcn(&work, H_per, &params); #endif #ifdef TWO_PERIODIC SE2P_FGG_extend_fcn(&work, H_per, &params); #endif #ifdef ONE_PERIODIC SE1P_FGG_extend_fcn(&work, H_per, &params); #endif SE_FKG_int(phi, &work, &st, &params); } // done SE_FGG_free_workspace(&work); }
GB_unop__identity_int32_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int32_uint8 // op(A') function: GB_unop_tran__identity_int32_uint8 // C type: int32_t // A type: uint8_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int32_uint8 ( int32_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
calc_dnn.c
/* * Copyright (c) 1991-2013 Kawahara Lab., Kyoto University * Copyright (c) 2000-2005 Shikano Lab., Nara Institute of Science and Technology * Copyright (c) 2005-2013 Julius project team, Nagoya Institute of Technology * All rights reserved */ /* define this to test disabling expsum computation at softmax */ #undef NO_SUM_COMPUTATION #ifdef _WIN32 #include <intrin.h> #else #if defined(__arm__) || TARGET_OS_IPHONE || defined(__aarch64__) || defined(__EMSCRIPTEN__) #else #include <cpuid.h> #endif #endif /* _WIN32 */ #include <sent/stddefs.h> #include <sent/htk_hmm.h> #include <sent/htk_param.h> #include <sent/hmm.h> #include <sent/hmm_calc.h> #if defined(HAS_SIMD_FMA) || defined(HAS_SIMD_AVX) || defined(HAS_SIMD_SSE) || defined(HAS_SIMD_NEON) || defined(HAS_SIMD_NEONV2) #define SIMD_ENABLED #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #endif static int use_simd = USE_SIMD_NONE; /************************************************************************/ /* determine which SIMD code to run */ #ifdef SIMD_ENABLED static void cpu_id_check() { int cpuinfo[4]; boolean sse = FALSE, avx = FALSE, fma = FALSE; use_simd = USE_SIMD_NONE; #if defined(__arm__) || TARGET_OS_IPHONE /* on ARM NEON */ #if defined(HAS_SIMD_NEONV2) use_simd = USE_SIMD_NEONV2; #elif defined(HAS_SIMD_NEON) use_simd = USE_SIMD_NEON; #else use_simd = USE_SIMD_NONE; #endif #else /* ~__arm__ */ #ifdef _WIN32 __cpuid(cpuinfo, 0x00000001); if(cpuinfo[3] & (1 << 25)) { sse = TRUE; } if(cpuinfo[2] & (1 << 28)) { avx = TRUE; } if(cpuinfo[2] & (1 << 12)) { fma = TRUE; } #else /* ~_WIN32 */ unsigned int eax, ebx, ecx, edx; if (__get_cpuid(1, &eax, &ebx, &ecx, &edx) == 0) return; if (edx & bit_SSE) { sse = TRUE; } if (ecx & bit_AVX) { avx = TRUE; } if (ecx & bit_FMA) { fma = TRUE; } #endif /* _WIN32 */ #ifdef HAS_SIMD_FMA if (fma == TRUE) { use_simd = USE_SIMD_FMA; return; } #endif #ifdef HAS_SIMD_AVX if (avx == TRUE) { use_simd = USE_SIMD_AVX; return; } #endif #ifdef HAS_SIMD_SSE if (sse == TRUE) { use_simd = USE_SIMD_SSE; return; } #endif #endif /* ~__arm__ */ } static void *mymalloc_simd_aligned(size_t size) { void *ptr; switch(use_simd) { case USE_SIMD_FMA: case USE_SIMD_AVX: ptr = mymalloc_aligned(size, 32); break; case USE_SIMD_SSE: case USE_SIMD_NEON: case USE_SIMD_NEONV2: ptr = mymalloc_aligned(size, 16); break; default: ptr = mymalloc(size); break; } return ptr; } static void myfree_simd_aligned(void *ptr) { switch(use_simd) { case USE_SIMD_FMA: case USE_SIMD_AVX: case USE_SIMD_SSE: case USE_SIMD_NEON: case USE_SIMD_NEONV2: if (ptr != NULL) myfree_aligned(ptr); break; default: if (ptr != NULL) free(ptr); break; } } #endif /* SIMD_ENABLED */ void get_builtin_simd_string(char *buf) { buf[0] = '\0'; #ifdef HAS_SIMD_NEON strcat(buf, " NEON"); #endif #ifdef HAS_SIMD_NEONV2 strcat(buf, " NEONv2"); #endif #ifdef HAS_SIMD_SSE strcat(buf, " SSE"); #endif #ifdef HAS_SIMD_AVX strcat(buf, " AVX"); #endif #ifdef HAS_SIMD_FMA strcat(buf, " FMA"); #endif } int check_avail_simd() { #ifdef SIMD_ENABLED cpu_id_check(); #endif return use_simd; } static void output_use_simd() { #ifdef SIMD_ENABLED #ifdef HAS_SIMD_NEON jlog("Stat: calc_dnn: ARM NEON instructions built-in\n"); #endif #ifdef HAS_SIMD_NEONV2 jlog("Stat: calc_dnn: ARM NEONv2 instructions built-in\n"); #endif #ifdef HAS_SIMD_FMA jlog("Stat: calc_dnn: FMA instructions built-in\n"); #endif #ifdef HAS_SIMD_AVX jlog("Stat: calc_dnn: AVX instructions built-in\n"); #endif #ifdef HAS_SIMD_SSE jlog("Stat: calc_dnn: SSE instructions built-in\n"); #endif #else /* ~SIMD_ENABLED */ jlog("Warning: NO built-in SIMD support, DNN computation may be too slow!\n"); return; #endif /* ~SIMD_ENABLED */ #ifdef SIMD_ENABLED if (use_simd == USE_SIMD_SSE) { jlog("Stat: clac_dnn: use SSE SIMD instruction (128bit)\n"); } else if (use_simd == USE_SIMD_AVX) { jlog("Stat: clac_dnn: use AVX SIMD instruction (256bit)\n"); } else if (use_simd == USE_SIMD_FMA) { jlog("Stat: clac_dnn: use FMA SIMD instruction (256bit)\n"); } else if (use_simd == USE_SIMD_NEON) { jlog("Stat: use ARM NEON instruction\n"); } else if (use_simd == USE_SIMD_NEONV2) { jlog("Stat: use ARM NEONv2 instruction\n"); } else { jlog("Warning: clac_dnn: no SIMD support, DNN computation may be too slow!\n"); } #endif /* SIMD_ENABLED */ } /************************************************************************/ /* .npy file load */ static boolean load_npy(float *array, char *filename, int x, int y) { FILE *fp; unsigned char code; char magic[6]; unsigned char major_version; unsigned char minor_version; unsigned short header_len; char *header; size_t len; boolean fortran_order; if ((fp = fopen_readfile(filename)) == NULL) { jlog("Error: load_npy: unable to open: %s\n", filename); return FALSE; } if ((len = myfread(&code, 1, 1, fp)) < 1) { jlog("Error: load_npy: failed to read header: %s\n", filename); fclose_readfile(fp); return FALSE; } if (code != 0x93) { jlog("Error: load_npy: wrong magic number, not an npy file: %s\n", filename); return FALSE; } if ((len = myfread(magic, 1, 5, fp)) < 5) { jlog("Error: load_npy: failed to read header: %s\n", filename); fclose_readfile(fp); return FALSE; } magic[5] = '\0'; if (strmatch(magic, "NUMPY") == FALSE) { jlog("Error: load_npy: wrong magic header, not an npy file: %s\n", filename); return FALSE; } if ((len = myfread(&major_version, 1, 1, fp)) < 1) { jlog("Error: load_npy: failed to read header: %s\n", filename); fclose_readfile(fp); return FALSE; } /* we only assume Version 1.x format */ /* not check subversion x */ if (major_version != 1) { jlog("Error: load_npy: can read only Version 1.0 but this file is Version %d\n", major_version); fclose_readfile(fp); return FALSE; } if ((len = myfread(&minor_version, 1, 1, fp)) < 1) { jlog("Error: load_npy: failed to read header: %s\n", filename); fclose_readfile(fp); return FALSE; } /* currently not support all conversion */ /* accept only littlen endian 4byte float, with fortran order */ /* produce error if the file has other format */ if ((len = myfread(&header_len, 2, 1, fp)) < 1) { jlog("Error: load_npy: failed to read header length: %s\n", filename); fclose_readfile(fp); return FALSE; } #ifdef WORDS_BIGENDIAN swap_bytes(&header_len, 2, 1); #endif header = (char *)mymalloc(header_len + 1); if ((len = myfread(header, 1, header_len, fp)) < header_len) { jlog("Error: load_npy: failed to read header (%d bytes): %s\n", header_len, filename); free(header); fclose_readfile(fp); return FALSE; } header[header_len] = '\0'; if (strstr(header, "'descr': '<f4'") == NULL) { jlog("Error: load_npy: not a little-endian float array: %s\n", filename); free(header); fclose_readfile(fp); return FALSE; } /* fortran order: data are stored per columns */ /* C order: data are stored per row */ if (strstr(header, "'fortran_order': True")) { fortran_order = TRUE; } else { fortran_order = FALSE; } { char buf[100]; sprintf(buf, "'shape': (%d, %d)", x, y); if (strstr(header, buf) == NULL) { sprintf(buf, "'shape': (%d, %d)", y, x); if (strstr(header, buf) == NULL) { jlog("Error: load_npy: not a (%d, %d) array? %s\n", x, y, filename); free(header); fclose_readfile(fp); return FALSE; } } } free(header); /* just read them in the order */ if ((len = myfread(array, 4, x * y, fp)) < x * y) { jlog("Error: load_npy: failed to read %d bytes: %s\n", x * y, filename); fclose_readfile(fp); return FALSE; } fclose_readfile(fp); return TRUE; } /************************************************************************/ /* standard logistic function value table: take range x[-6,6] */ /* table size: LOGISTIC_TABLE_FACTOR * 12 * 4 (bytes) */ #define LOGISTIC_TABLE_FACTOR 20000 #define LOGISTIC_TABLE_MAX (16 * LOGISTIC_TABLE_FACTOR) #define LOGISTIC_MIN 0.000334 #define LOGISTIC_MAX 0.999666 static float logistic_table[LOGISTIC_TABLE_MAX+1]; /* logistic value table */ /* build logistic function value table */ static void logistic_table_build() { int i; double d; double x; for (i = 0; i <= LOGISTIC_TABLE_MAX; i++) { x = (double)i / (double)LOGISTIC_TABLE_FACTOR - 8.0; d = 1.0 / (1.0 + exp(-x)); logistic_table[i] = (float)d; } } /* return logistic function value, consulting table */ static float logistic_func(float x) { if (x <= -8.0f) return LOGISTIC_MIN; if (x >= 8.0f) return LOGISTIC_MAX; return logistic_table[(int)((x + 8.0f) * LOGISTIC_TABLE_FACTOR + 0.5)]; } /* initialize dnn layer */ static void dnn_layer_init(DNNLayer *l) { l->w = NULL; l->b = NULL; l->in = 0; l->out = 0; #ifdef _OPENMP l->begin = NULL; l->end = NULL; #endif /* _OPENMP */ #ifdef __NVCC__ l->dw = NULL; l->db = NULL; #endif /* __NVCC__ */ } /* load dnn layer parameter from files */ static boolean dnn_layer_load(DNNLayer *l, int in, int out, char *wfile, char *bfile, int thread_num) { l->in = in; l->out = out; #ifdef SIMD_ENABLED if (use_simd == USE_SIMD_AVX && l->in % 8 != 0) { jlog("Error: dnn_layer_load: input vector length is not 8-element aligned (%d)\n", l->in); return FALSE; } if (use_simd == USE_SIMD_SSE && l->in % 4 != 0) { jlog("Error: dnn_layer_load: input vector length is not 4-element aligned (%d)\n", l->in); return FALSE; } l->w = (float *)mymalloc_simd_aligned(sizeof(float) * l->out * l->in); l->b = (float *)mymalloc_simd_aligned(sizeof(float) * l->out); #else l->w = (float *)mymalloc(sizeof(float) * l->out * l->in); l->b = (float *)mymalloc(sizeof(float) * l->out); #endif /* SIMD_ENABLED */ if (! load_npy(l->w, wfile, l->in, l->out)) return FALSE; jlog("Stat: dnn_layer_load: loaded %s\n", wfile); if (! load_npy(l->b, bfile, l->out, 1)) return FALSE; jlog("Stat: dnn_layer_load: loaded %s\n", bfile); #ifdef _OPENMP /* divide into thread chunks */ if (l->begin == NULL) { l->begin = (int *)mymalloc(sizeof(int) * thread_num); } if (l->end == NULL) { l->end = (int *)mymalloc(sizeof(int) * thread_num); } int num = l->out / thread_num; /* padding base chunk size to factor of 4 for better SIMD processing */ num = ((num + 3) / 4) * 4; int i; for (i = 0; i < thread_num; i++) { l->begin[i] = num * i; l->end[i] = num * i + num; if (l->end[i] > l->out) l->end[i] = l->out; } #endif /* _OPENMP */ return TRUE; } /* clear dnn layer */ static void dnn_layer_clear(DNNLayer *l) { #ifdef SIMD_ENABLED if (l->w != NULL) myfree_simd_aligned(l->w); if (l->b != NULL) myfree_simd_aligned(l->b); #else if (l->w != NULL) free(l->w); if (l->b != NULL) free(l->b); #endif /* SIMD_ENABLED */ #ifdef _OPENMP if (l->begin != NULL) free(l->begin); if (l->end != NULL) free(l->end); #endif /* _OPENMP */ #ifdef __NVCC__ cuda_layer_free(l); #endif /* __NVCC__ */ dnn_layer_init(l); } /*********************************************************************/ DNNData *dnn_new() { DNNData *d; d = (DNNData *)mymalloc(sizeof(DNNData)); memset(d, 0, sizeof(DNNData)); return d; } void dnn_clear(DNNData *dnn) { int i; #ifdef __NVCC__ cuda_dnn_clear(dnn); #endif /* __NVCC__ */ if (dnn->h) { for (i = 0; i < dnn->hnum; i++) { dnn_layer_clear(&(dnn->h[i])); } free(dnn->h); } dnn_layer_clear(&(dnn->o)); if (dnn->state_prior) free(dnn->state_prior); for (i = 0; i < dnn->hnum; i++) { if (dnn->work[i]) { #ifdef SIMD_ENABLED myfree_simd_aligned(dnn->work[i]); #else free(dnn->work[i]); #endif } } free(dnn->work); #ifdef SIMD_ENABLED if (dnn->invec) myfree_simd_aligned(dnn->invec); if (dnn->accum) myfree_aligned(dnn->accum); #endif memset(dnn, 0, sizeof(DNNData)); } void dnn_free(DNNData *dnn) { dnn_clear(dnn); free(dnn); } /************************************************************************/ static void sub1(float *dst, float *src, float *w, float *b, int out, int in, float *fstore) { float *s; int i, j; for (i = 0; i < out; i++) { float x = 0.0f; s = src; for (j = 0; j < in; j++) { x += *(w++) * *(s++); } *(dst++) = x + *(b++); } } /************************************************************************/ /* initialize dnn */ boolean dnn_setup(DNNData *dnn, int veclen, int contextlen, int inputnodes, int outputnodes, int hiddennodes, int hiddenlayernum, char **wfile, char **bfile, char *output_wfile, char *output_bfile, char *priorfile, float prior_factor, boolean state_prior_log10nize, int batchsize, int num_threads, char *cuda_mode) { int i; /* check if CPU has SIMD instruction support */ #ifdef SIMD_ENABLED cpu_id_check(); #endif if (dnn == NULL) return FALSE; /* clear old data if exist */ dnn_clear(dnn); /* build logistic table */ logistic_table_build(); /* set values */ dnn->batch_size = batchsize; dnn->veclen = veclen; dnn->contextlen = contextlen; dnn->inputnodenum = inputnodes; dnn->hiddennodenum = hiddennodes; dnn->outputnodenum = outputnodes; dnn->prior_factor = prior_factor; dnn->num_threads = num_threads; #ifdef __NVCC__ dnn->blocksize1 = 0; dnn->blocksize2 = 0; if (cuda_mode == NULL) { dnn->use_cuda = TRUE; dnn->use_cuda_shared = FALSE; } else if (strmatch(cuda_mode, "disable")) { dnn->use_cuda = FALSE; dnn->use_cuda_shared = FALSE; } else if (strnmatch(cuda_mode, "global", 6)) { dnn->use_cuda = TRUE; dnn->use_cuda_shared = FALSE; if (strlen(cuda_mode) > 6) { char *buf = strdup(cuda_mode + 6); char *p, *save; int n = 0; for (p = mystrtok_safe(buf, ",", &save); p; p = mystrtok_safe(NULL, ",", &save)) { switch(n) { case 0: dnn->blocksize1 = atoi(p); break; default: jlog("Error: dnn_init: too many CUDA mode parameter: %s\n", cuda_mode); return FALSE; } n++; } free(buf); } } else if (strnmatch(cuda_mode, "shared", 6)) { dnn->use_cuda = TRUE; dnn->use_cuda_shared = TRUE; if (strlen(cuda_mode) > 6) { #if 1 jlog("Error: dnn_init: CUDA shared mode block parameters are fixed to 16x8, remove the parameters: %s\n", cuda_mode); return FALSE; #else char *buf = strdup(cuda_mode + 6); char *p, *save; int n = 0; for (p = mystrtok_safe(buf, ",", &save); p; p = mystrtok_safe(NULL, ",", &save)) { switch(n) { case 0: dnn->blocksize1 = atoi(p); break; case 1: dnn->blocksize2 = atoi(p); break; default: jlog("Error: dnn_init: too many CUDA mode parameter: %s\n", cuda_mode); return FALSE; } n++; } free(buf); #endif } } #else if (cuda_mode != NULL && strmatch(cuda_mode, "disable") == FALSE) { jlog("Error: dnn_init: CUDA mode specified as \"%s\" but no CUDA support is built-in\n", cuda_mode); return FALSE; } #endif /* __NVCC__ */ #ifdef _OPENMP /* set number of threads */ int max_num_threads = omp_get_max_threads(); if (dnn->num_threads > max_num_threads) { jlog("Warning: dnn_init: %d threads requested but available max is %d\n", dnn->num_threads, max_num_threads); dnn->num_threads = max_num_threads; } jlog("Stat: dnn_init: use %d threads for DNN computation (max %d cores)\n", dnn->num_threads, max_num_threads); #endif /* OPENMP */ #ifdef __NVCC__ // copy logistic_table to GPU if (dnn->use_cuda) { cuda_copy_logistic_table(logistic_table, LOGISTIC_TABLE_MAX + 1); jlog("Stat: dnn_init: logistic table copied to GPU\n"); } #endif /* __NVCC__ */ /* check for input length */ { int inputlen = veclen * contextlen; if (inputnodes != inputlen) { jlog("Error: dnn_init: veclen(%d) * contextlen(%d) != inputnodes(%d)\n", veclen, contextlen, inputnodes); return FALSE; } jlog("Stat: dnn_init: input: vec %d * context %d = %d dim\n", veclen, contextlen, inputlen); jlog("Stat: dnn_init: input layer: %d dim\n", inputnodes); jlog("Stat: dnn_init: %d hidden layer(s): %d dim\n", hiddenlayernum, hiddennodes); jlog("Stat: dnn_init: output layer: %d dim\n", outputnodes); } /* initialize layers */ dnn->hnum = hiddenlayernum; dnn->h = (DNNLayer *)mymalloc(sizeof(DNNLayer) * dnn->hnum); for (i = 0; i < dnn->hnum; i++) { dnn_layer_init(&(dnn->h[i])); } dnn_layer_init(&(dnn->o)); /* load layer parameters */ if (dnn_layer_load(&(dnn->h[0]), inputnodes, hiddennodes, wfile[0], bfile[0], dnn->num_threads) == FALSE) return FALSE; for (i = 1; i < dnn->hnum; i++) { if (dnn_layer_load(&(dnn->h[i]), hiddennodes, hiddennodes, wfile[i], bfile[i], dnn->num_threads) == FALSE) return FALSE; } if (dnn_layer_load(&(dnn->o), hiddennodes, outputnodes, output_wfile, output_bfile, dnn->num_threads) == FALSE) return FALSE; #ifdef __NVCC__ // load DNN layer definitions to GPU if (dnn->use_cuda) { for (i = 0; i < dnn->hnum; i++) { cuda_layer_load(&(dnn->h[i])); jlog("Stat: dnn_init: layer #%d loaded to GPU\n", i); } cuda_layer_load(&(dnn->o)); jlog("Stat: dnn_init: output layer loaded to GPU\n"); } #endif /* __NVCC__ */ /* load state prior */ { FILE *fp; int id; float val; dnn->state_prior_num = outputnodes; dnn->state_prior = (float *)mymalloc(sizeof(float) * dnn->state_prior_num); for (i = 0; i < dnn->state_prior_num; i++) { dnn->state_prior[i] = 0.0f; } if ((fp = fopen(priorfile, "r")) == NULL) { jlog("Error: cannot open %s\n", priorfile); return FALSE; } while (fscanf(fp, "%d %e", &id, &val) != EOF){ if (id < 0 || id >= dnn->state_prior_num) { jlog("Error: wrong state id in prior file (%d)\n", id); fclose_readfile(fp); return FALSE; } dnn->state_prior[id] = val * prior_factor; if (state_prior_log10nize) { // log10-nize prior dnn->state_prior[id] = log10(dnn->state_prior[id]); } } fclose(fp); jlog("Stat: dnn_init: state prior loaded: %s\n", priorfile); } /* allocate work area */ dnn->work = (float **)mymalloc(sizeof(float *) * dnn->hnum); for (i = 0; i < dnn->hnum; i++) { #ifdef SIMD_ENABLED dnn->work[i] = (float *)mymalloc_simd_aligned(sizeof(float) * dnn->hiddennodenum); #else dnn->work[i] = (float *)mymalloc(sizeof(float) * dnn->hiddennodenum); #endif } #ifdef SIMD_ENABLED dnn->invec = (float *)mymalloc_simd_aligned(sizeof(float) * inputnodes); #ifdef _OPENMP dnn->accum = (float *)mymalloc_simd_aligned(32 * dnn->num_threads); #else dnn->accum = (float *)mymalloc_simd_aligned(32); #endif /* OPENMP */ #endif #ifdef __NVCC__ if (dnn->use_cuda) cuda_dnn_setup(dnn); if (dnn->use_cuda) { if (dnn->use_cuda_shared) { jlog("Stat: dnn_init: CUDA mode: shared, block size = %d x %d\n", dnn->blocksize1, dnn->blocksize2); } else { jlog("Stat: dnn_init: CUDA mode: global, block size = %d\n", dnn->blocksize1); } } else { jlog("Stat: dnn_init: disabled CUDA support for DNN computation\n"); } #else jlog("Stat: dnn_init: no CUDA support is built in, CUDA will not be used\n"); #endif /* __NVCC__ */ /* choose sub function */ #ifdef SIMD_ENABLED switch(use_simd) { case USE_SIMD_FMA: dnn->subfunc = calc_dnn_fma; break; case USE_SIMD_AVX: dnn->subfunc = calc_dnn_avx; break; case USE_SIMD_SSE: dnn->subfunc = calc_dnn_sse; break; case USE_SIMD_NEON: dnn->subfunc = calc_dnn_neon; break; case USE_SIMD_NEONV2: dnn->subfunc = calc_dnn_neonv2; break; default: dnn->subfunc = sub1; break; } #else dnn->subfunc = sub1; #endif /* SIMD_ENABLED */ /* output CPU related info */ output_use_simd(); return TRUE; } void dnn_calc_outprob(HMMWork *wrk) { float *src; DNNData *dnn = wrk->OP_dnn; #ifndef _OPENMP int n = 0; int hidx, i; float *dst; DNNLayer *h; #endif #ifdef __NVCC__ if (dnn->use_cuda) { cuda_calc_outprob(wrk); return; } #endif /* frame = wrk->OP_time */ /* param = wrk->OP_param */ /* input vector = wrk->OP_param[wrk->OP_time][] */ /* store state outprob to wrk->last_cache[] */ /* feed forward through hidden layers by standard logistic function */ #ifdef SIMD_ENABLED memcpy(dnn->invec, &(wrk->OP_param->parvec[wrk->OP_time][0]), sizeof(float) * dnn->inputnodenum); src = dnn->invec; #else src = &(wrk->OP_param->parvec[wrk->OP_time][0]); #endif /* SIMD_ENABLED */ #ifdef _OPENMP #pragma omp parallel num_threads(dnn->num_threads) { int hidx, i; float *lsrc, *dst; DNNLayer *h; int id = omp_get_thread_num(); int j; lsrc = src; for (hidx = 0; hidx < dnn->hnum; hidx++) { dst = dnn->work[hidx]; h = &(dnn->h[hidx]); (*dnn->subfunc)(dst + h->begin[id] , lsrc, h->w + h->begin[id] * h->in, h->b + h->begin[id], h->end[id] - h->begin[id], h->in, dnn->accum + id * 8); for (j = h->begin[id] ; j < h->end[id]; j++) { if (dst[j] <= -8.0f) dst[j] = LOGISTIC_MIN; else if (dst[j] >= 8.0f) dst[j] = LOGISTIC_MAX; else dst[j] = logistic_table[(int)((dst[j] + 8.0f) * LOGISTIC_TABLE_FACTOR + 0.5)]; } #pragma omp barrier lsrc = dst; } /* compute output layer */ (*dnn->subfunc)(wrk->last_cache + dnn->o.begin[id] , lsrc, dnn->o.w + dnn->o.begin[id] * dnn->o.in, dnn->o.b + dnn->o.begin[id], dnn->o.end[id] - dnn->o.begin[id], dnn->o.in, dnn->accum + id * 8); } #else /* ~_OPENMP */ for (hidx = 0; hidx < dnn->hnum; hidx++) { dst = dnn->work[hidx]; h = &(dnn->h[hidx]); (*dnn->subfunc)(dst, src, h->w, h->b, h->out, h->in, dnn->accum); for (i = 0; i < h->out; i++) { dst[i] = logistic_func(dst[i]); } src = dst; } /* compute output layer */ (*dnn->subfunc)(wrk->last_cache, src, dnn->o.w, dnn->o.b, dnn->o.out, dnn->o.in, dnn->accum); #endif /* _OPENMP */ /* do softmax */ /* INV_LOG_TEN * (x - addlogarray(x)) - log10(state_prior)) */ #ifdef NO_SUM_COMPUTATION /* not compute sum */ for (i = 0; i < wrk->statenum; i++) { wrk->last_cache[i] = INV_LOG_TEN * wrk->last_cache[i] - dnn->state_prior[i]; } #else /* compute sum */ { int i; float logprob = addlog_array(wrk->last_cache, wrk->statenum); for (i = 0; i < wrk->statenum; i++) { wrk->last_cache[i] = INV_LOG_TEN * (wrk->last_cache[i] - logprob) - dnn->state_prior[i]; } } #endif /* NO_SUM_COMPUTATION */ }
omp_task_red_taskloop.c
// RUN: %libomp-compile-and-run // Parsing error until gcc8: // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 // Parsing error until clang11: // UNSUPPORTED: clang-10, clang-9, clang-8, clang-7 // No icc compiler support yet // XFAIL: icc #include <stdio.h> #include <omp.h> int r; int work(int k, int l) { return k + l + 1; } void bar(int i) { #pragma omp taskgroup task_reduction(+:r) { int th_gen = omp_get_thread_num(); #pragma omp task in_reduction(+:r) firstprivate(i, th_gen) { r += work(i, 0); printf("executing task (%d, 0), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen); } #pragma omp task in_reduction(+:r) firstprivate(i, th_gen) { r += work(i, 1); printf("executing task (%d, 1), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen); } } } int foo() { int i; int th_gen = omp_get_thread_num(); #pragma omp taskgroup task_reduction(+:r) { bar(0); } printf("th %d passed bar0\n", th_gen); #pragma omp taskloop reduction(+:r) firstprivate(th_gen) for (i = 1; i < 4; ++i) { bar(i); printf("th %d (gen by th %d) passed bar%d in taskloop\n", omp_get_thread_num(), th_gen, i); #pragma omp task in_reduction(+:r) r += i; } return 0; } // res = ((1+2)+(2+3)+(3+4)+(4+5)+1+2+3) = 30 #define res 30 int main() { r = 0; #pragma omp parallel num_threads(2) { // barrier ensures threads have started before tasks creation #pragma omp barrier // single ensures no race condition between taskgroup reductions #pragma omp single nowait foo(); } if (r == res) { return 0; } else { printf("error r = %d (!= %d)\n", r, res); return 1; } }
pcpaes_cbcdecrypt.c
/******************************************************************************* * Copyright 2013-2019 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // // Purpose: // Cryptography Primitive. // AES encryption/decryption (CBC mode) // AES encryption/decryption (CBC-CS mode) // // Contents: // ippsAESDecryptCBC() // */ #include "owndefs.h" #include "owncp.h" #include "pcpaesm.h" #include "pcptool.h" #include "pcpaes_cbc.h" #if defined( _OPENMP ) #include <omp.h> #endif /*F* // Name: ippsAESDecryptCBC // // Purpose: AES-CBC decryption. // // Returns: Reason: // ippStsNullPtrErr pCtx == NULL // pSrc == NULL // pDst == NULL // pIV == NULL // ippStsContextMatchErr !VALID_AES_ID() // ippStsLengthErr len <1 // ippStsUnderRunErr 0!=(dataLen%MBS_RIJ128) // ippStsNoErr no errors // // Parameters: // pSrc pointer to the source data buffer // pDst pointer to the target data buffer // len input/output buffer length (in bytes) // pCtx pointer to the AES context // pIV pointer to the initialization vector // *F*/ IPPFUN(IppStatus, ippsAESDecryptCBC,(const Ipp8u* pSrc, Ipp8u* pDst, int len, const IppsAESSpec* pCtx, const Ipp8u* pIV)) { /* test context */ IPP_BAD_PTR1_RET(pCtx); /* use aligned AES context */ pCtx = (IppsAESSpec*)( IPP_ALIGNED_PTR(pCtx, AES_ALIGNMENT) ); /* test the context ID */ IPP_BADARG_RET(!VALID_AES_ID(pCtx), ippStsContextMatchErr); /* test source, target buffers and initialization pointers */ IPP_BAD_PTR3_RET(pSrc, pIV, pDst); /* test stream length */ IPP_BADARG_RET((len<1), ippStsLengthErr); /* test stream integrity */ IPP_BADARG_RET((len&(MBS_RIJ128-1)), ippStsUnderRunErr); /* do encryption */ { int nBlocks = len / MBS_RIJ128; #if !defined(_OPENMP) cpDecryptAES_cbc(pIV, pSrc, pDst, nBlocks, pCtx); #else int blk_per_thread = AES_NI_ENABLED==RIJ_AESNI(pCtx)? AESNI128_MIN_BLK_PER_THREAD : RIJ128_MIN_BLK_PER_THREAD; int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/blk_per_thread, 1)); if(1==nThreads) cpDecryptAES_cbc(pIV, pSrc, pDst, nBlocks, pCtx); else { int blksThreadReg; int blksThreadTail; Ipp8u locIV[MBS_RIJ128*DEFAULT_CPU_NUM]; #if defined(__INTEL_COMPILER) Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM? kmp_malloc(nThreads*MBS_RIJ128) : locIV; #else Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM ? malloc(nThreads*MBS_RIJ128) : locIV; #endif if(pLocIV) { #pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads) { #pragma omp master { int nt; nThreads = omp_get_num_threads(); blksThreadReg = nBlocks / nThreads; blksThreadTail = blksThreadReg + nBlocks % nThreads; CopyBlock16(pIV, pLocIV+0); for(nt=1; nt<nThreads; nt++) CopyBlock16(pSrc+nt*blksThreadReg*MBS_RIJ128-MBS_RIJ128, pLocIV+nt*MBS_RIJ128); } #pragma omp barrier { int id = omp_get_thread_num(); Ipp8u* pThreadIV = (Ipp8u*)pLocIV +id*MBS_RIJ128; Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*blksThreadReg * MBS_RIJ128; Ipp8u* pThreadDst = (Ipp8u*)pDst + id*blksThreadReg * MBS_RIJ128; int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg; cpDecryptAES_cbc(pThreadIV, pThreadSrc, pThreadDst, blkThread, pCtx); } } if(pLocIV != locIV) #if defined(__INTEL_COMPILER) kmp_free(pLocIV); #else free(pLocIV); #endif } else return ippStsMemAllocErr; } #endif return ippStsNoErr; } }
MINDSSCbox.h
/// @brief Applies a box filter to image data /// @note Box filters are often used to efficiently approximate Gaussian filters /// /// @param [out] input is the image to filter /// @param [out] temp1 is a temporary buffer the same size as the input, provided by the caller to provide the function with memory necessary to perform the filter calculations /// @param [out] temp2 is a temporary buffer the same size as the input, provided by the caller to provide the function with memory necessary to perform the filter calculations /// @param [in] hw is /// @param [in] m is dimension #1 of the input image /// @param [in] n is dimension #2 of the input image /// @param [in] o is dimension #3 of the input image /// void boxfilter(float* input,float* temp1,float* temp2,int hw,int m,int n,int o){ // calculate length of the 'input' and 'temp1' vectors int sz=m*n*o; for(int i=0;i<sz;i++){ temp1[i]=input[i]; } for(int k=0;k<o;k++){ for(int j=0;j<n;j++){ for(int i=1;i<m;i++){ temp1[i+j*m+k*m*n]+=temp1[(i-1)+j*m+k*m*n]; } } } for(int k=0;k<o;k++){ for(int j=0;j<n;j++){ for(int i=0;i<(hw+1);i++){ temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]; } for(int i=(hw+1);i<(m-hw);i++){ temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n]; } for(int i=(m-hw);i<m;i++){ temp2[i+j*m+k*m*n]=temp1[(m-1)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n]; } } } for(int k=0;k<o;k++){ for(int j=1;j<n;j++){ for(int i=0;i<m;i++){ temp2[i+j*m+k*m*n]+=temp2[i+(j-1)*m+k*m*n]; } } } for(int k=0;k<o;k++){ for(int i=0;i<m;i++){ for(int j=0;j<(hw+1);j++){ temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]; } for(int j=(hw+1);j<(n-hw);j++){ temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n]; } for(int j=(n-hw);j<n;j++){ temp1[i+j*m+k*m*n]=temp2[i+(n-1)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n]; } } } for(int k=1;k<o;k++){ for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ temp1[i+j*m+k*m*n]+=temp1[i+j*m+(k-1)*m*n]; } } } for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ for(int k=0;k<(hw+1);k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]; } for(int k=(hw+1);k<(o-hw);k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]-temp1[i+j*m+(k-hw-1)*m*n]; } for(int k=(o-hw);k<o;k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(o-1)*m*n]-temp1[i+j*m+(k-hw-1)*m*n]; } } } } /// @brief Shifts an image by the specified vectorial displacement /// /// @param [in] input is the image to shift /// @param [out] output is where the shifted image will reside /// @param [out] dx is the amount to shift the image along the horizontal axis /// @param [in] dy is the amount to shift the image along the vertical axis /// @param [in] dz is the amount to shift the image along the depth axis /// @param [in] m is dimension #1 of the input image /// @param [in] n is dimension #2 of the input image /// @param [in] o is dimension #3 of the input image /// void imshift(float* input,float* output,int dx,int dy,int dz,int m,int n,int o){ for(int k=0;k<o;k++){ for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ if(i+dy>=0&&i+dy<m&&j+dx>=0&&j+dx<n&&k+dz>=0&&k+dz<o) output[i+j*m+k*m*n]=input[i+dy+(j+dx)*m+(k+dz)*m*n]; else output[i+j*m+k*m*n]=input[i+j*m+k*m*n]; } } } } /*void *distances(void *threadarg) { struct mind_data *my_data; my_data = (struct mind_data *) threadarg; float* im1=my_data->im1; float* d1=my_data->d1; int qs=my_data->qs; int ind_d1=my_data->ind_d1; int m=image_m; int n=image_n; int o=image_o;*/ /// @brief /// /// @param [in] im1 is the image /// @param [out] d1 is /// @param [in] m is dimension #1 of the input image /// @param [in] n is dimension #2 of the input image /// @param [in] o is dimension #3 of the input image /// @param [in] qs is the quantisation /// @param [in] l /// void distances(float* im1,float* d1,int m,int n,int o,int qs,int l){ // calculates the total number of elements in the 3D im1 matrix // creates three temporary buffers the same size as the 3D im1 matrix int sz1=m*n*o; float* w1=new float[sz1]; int len1=6; float* temp1=new float[sz1]; float* temp2=new float[sz1]; int dx[6]={+qs,+qs,-qs,+0,+qs,+0}; int dy[6]={+qs,-qs,+0,-qs,+0,+qs}; int dz[6]={0,+0,+qs,+qs,+qs,+qs}; imshift(im1,w1,dx[l],dy[l],dz[l],m,n,o); for(int i=0;i<sz1;i++){ w1[i]=(w1[i]-im1[i])*(w1[i]-im1[i]); } boxfilter(w1,temp1,temp2,qs,m,n,o); for(int i=0;i<sz1;i++){ d1[i+l*sz1]=w1[i]; } delete temp1; delete temp2; delete w1; } /// @brief /// /// @param [in] qs stands for 'quantisation' /// //__builtin_popcountll(left[i]^right[i]); absolute hamming distances void descriptor(uint64_t* mindq,float* im1,int m,int n,int o,int qs){ timeval time1,time2; //MIND with self-similarity context int dx[6]={+qs,+qs,-qs,+0,+qs,+0}; int dy[6]={+qs,-qs,+0,-qs,+0,+qs}; int dz[6]={0,+0,+qs,+qs,+qs,+qs}; int sx[12]={-qs,+0,-qs,+0,+0,+qs,+0,+0,+0,-qs,+0,+0}; int sy[12]={+0,-qs,+0,+qs,+0,+0,+0,+qs,+0,+0,+0,-qs}; int sz[12]={+0,+0,+0,+0,-qs,+0,-qs,+0,-qs,+0,-qs,+0}; int index[12]={0,0,1,1,2,2,3,3,4,4,5,5}; float sigma=0.75;//1.0;//0.75;//1.5; int rho=ceil(sigma*1.5)*2+1; int len1=6; const int len2=12; image_d=12; int d=12; int sz1=m*n*o; pthread_t thread1, thread2, thread3; //============== DISTANCES USING BOXFILTER =================== float* d1=new float[sz1*len1]; gettimeofday(&time1, NULL); #pragma omp parallel for for(int l=0;l<len1;l++){ distances(im1,d1,m,n,o,qs,l); } gettimeofday(&time2, NULL); float timeMIND1=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6); gettimeofday(&time1, NULL); //quantisation table const int val=6; const unsigned long long power=32; #pragma omp parallel for for(int k=0;k<o;k++){ unsigned int tablei[6]={0,1,3,7,15,31}; float compare[val-1]; for(int i=0;i<val-1;i++){ compare[i]=-log((i+1.5f)/val); } float mind1[12]; for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ for(int l=0;l<len2;l++){ if(i+sy[l]>=0&&i+sy[l]<m&&j+sx[l]>=0&&j+sx[l]<n&&k+sz[l]>=0&&k+sz[l]<o){ mind1[l]=d1[i+sy[l]+(j+sx[l])*m+(k+sz[l])*m*n+index[l]*sz1]; } else{ mind1[l]=d1[i+j*m+k*m*n+index[l]*sz1]; } } float minval=*min_element(mind1,mind1+len2); float sumnoise=0.0f; for(int l=0;l<len2;l++){ mind1[l]-=minval; sumnoise+=mind1[l]; } float noise1=max(sumnoise/(float)len2,1e-6f); for(int l=0;l<len2;l++){ mind1[l]/=noise1; } unsigned long long accum=0; unsigned long long tabled1=1; for(int l=0;l<len2;l++){ //mind1[l]=exp(-mind1[l]); int mind1val=0; for(int c=0;c<val-1;c++){ mind1val+=compare[c]>mind1[l]?1:0; } //int mind1val=min(max((int)(mind1[l]*val-0.5f),0),val-1); accum+=tablei[mind1val]*tabled1; tabled1*=power; } mindq[i+j*m+k*m*n]=accum; } } } gettimeofday(&time2, NULL); float timeMIND2=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6); delete d1; }
pt_to_pt_haloexchange.c
/***************************************************************************** * * * Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 * * * * produced by * * * * Mark Bull, Jim Enright and Fiona Reid * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk * * * * * * Copyright 2012, The University of Edinburgh * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ /*-----------------------------------------------------------*/ /* Contains the point-to-point halo exchange mixed mode */ /* OpenMP/MPI benchmarks. */ /* This includes: -masteronly haloexchange */ /* -funnelled haloexchange */ /* -multiple haloexchange */ /*-----------------------------------------------------------*/ #include "pt_to_pt_haloexchange.h" /*-----------------------------------------------------------*/ /* haloExchange */ /* */ /* Driver subroutine for the haloExchange benchmark. */ /*-----------------------------------------------------------*/ int haloExchange(int benchmarkType){ int dataSizeIter; /* find the ranks of the left and right neighbour */ findNeighbours(); /* initialise repsToDo to defaultReps */ repsToDo = defaultReps; /* Start loop over data sizes */ dataSizeIter = minDataSize; /* Initialise dataSizeIter */ while (dataSizeIter <= maxDataSize){ /* set sizeofBuffer */ sizeofBuffer = dataSizeIter * numThreads; /*Allocate space for the main data arrays */ allocateHaloexchangeData(sizeofBuffer); /* perform benchmark warm-up */ if (benchmarkType == MASTERONLY){ masteronlyHaloexchange(warmUpIters, dataSizeIter); } else if (benchmarkType == FUNNELLED){ funnelledHaloexchange(warmUpIters, dataSizeIter); } else if (benchmarkType == MULTIPLE){ multipleHaloexchange(warmUpIters, dataSizeIter); } /* Each process performs a verification test */ testHaloexchange(sizeofBuffer, dataSizeIter); /*Initialise the benchmark */ benchComplete = FALSE; /*Execute benchmark until target time is reached */ while (benchComplete != TRUE){ /*Start timer */ MPI_Barrier(comm); startTime = MPI_Wtime(); /*Execute benchmarkType for repsToDo repetitions*/ if (benchmarkType == MASTERONLY){ masteronlyHaloexchange(repsToDo, dataSizeIter); } else if (benchmarkType == FUNNELLED){ funnelledHaloexchange(repsToDo, dataSizeIter); } else if (benchmarkType == MULTIPLE){ multipleHaloexchange(repsToDo, dataSizeIter); } /*Stop timer */ MPI_Barrier(comm); finishTime = MPI_Wtime(); totalTime = finishTime - startTime; /* Test if target time is reached with the number of reps */ if (myMPIRank==0){ benchComplete = repTimeCheck(totalTime, repsToDo); } /* Ensure all procs have the same value of benchComplete */ /* and repsToDo */ MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); } /* Master process sets benchmark results */ if (myMPIRank == 0 ){ setReportParams(dataSizeIter, repsToDo, totalTime); printReport(); } /* Free allocated data */ freeHaloexchangeData(); /* Double dataSize and loop again */ dataSizeIter = dataSizeIter * 2; } return 0; } /*-----------------------------------------------------------*/ /* masteronlyHaloexchange */ /* */ /* Each process exchanges a message with its left and */ /* right neighbour. */ /* Communication takes place outside of the parallel */ /* region. */ /*-----------------------------------------------------------*/ int masteronlyHaloexchange(int totalReps, int dataSize){ int repIter, i; for (repIter=0; repIter<totalReps; repIter++){ /* Each thread writes its globalID to rightSendBuf * and leftSendBuf using a parallel for directive. */ #pragma omp parallel for default(none) \ private(i) \ shared(leftSendBuf,rightSendBuf,dataSize) \ shared(sizeofBuffer,globalIDarray) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ leftSendBuf[i] = globalIDarray[myThreadID]; rightSendBuf[i] = globalIDarray[myThreadID]; } /* Process starts send of data to leftNeighbour and * rightNeighbour using non-blocking send... */ MPI_Isend(leftSendBuf, sizeofBuffer, MPI_INT, leftNeighbour, \ TAG, commCart, &requestArray[0]); MPI_Isend(rightSendBuf, sizeofBuffer, MPI_INT, rightNeighbour, \ TAG, commCart, &requestArray[1]); /* Process then waits for messages from leftNeighbour and rightNeighbour */ MPI_Irecv(leftRecvBuf, sizeofBuffer, MPI_INT, leftNeighbour, \ TAG, commCart, &requestArray[2]); MPI_Irecv(rightRecvBuf, sizeofBuffer, MPI_INT, rightNeighbour, \ TAG, commCart, &requestArray[3]); /* Finish the sends with an MPI_Waitall on the requests */ MPI_Waitall(4, requestArray, statusArray); /* Each thread now reads its part of the left and right * received buffers. */ #pragma omp parallel for default(none) \ private(i) \ shared(leftRecvBuf,rightRecvBuf,dataSize,sizeofBuffer) \ shared(finalLeftBuf,finalRightBuf) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalLeftBuf[i] = leftRecvBuf[i]; finalRightBuf[i] = rightRecvBuf[i]; } } return 0; } /*-----------------------------------------------------------*/ /* funnelledHaloexchange */ /* */ /* Each process exchanges a message with its left and */ /* right neighbour. */ /* Communication takes place by one thread inside of the */ /* parallel region. */ /*-----------------------------------------------------------*/ int funnelledHaloexchange(int totalReps, int dataSize){ int repIter, i; /* Open the parallel region */ #pragma omp parallel \ private(i,repIter) \ shared(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf) \ shared(rightRecvBuf,leftRecvBuf,finalLeftBuf,finalRightBuf) \ shared(globalIDarray,commCart,totalReps,requestArray,statusArray) \ shared(leftNeighbour,rightNeighbour) { for (repIter=0; repIter<totalReps; repIter++){ /* Each thread writes its globalID to rightSendBuf * and leftSendBuf. */ #pragma omp for schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ leftSendBuf[i] = globalIDarray[myThreadID]; rightSendBuf[i] = globalIDarray[myThreadID]; } /* Implicit barrier here takes care of necessary synchronisation */ #pragma omp master { /* Master thread starts send of data to left and right neighbours * with a non-blocking send. */ MPI_Isend(leftSendBuf, sizeofBuffer, MPI_INT, leftNeighbour, \ TAG, commCart, &requestArray[0]); MPI_Isend(rightSendBuf, sizeofBuffer, MPI_INT, rightNeighbour, \ TAG, commCart, &requestArray[1]); /* Thread then starts receive of messages from leftNeighbour * and rightNeighbour. */ MPI_Irecv(leftRecvBuf, sizeofBuffer, MPI_INT, leftNeighbour, \ TAG, commCart, &requestArray[2]); MPI_Irecv(rightRecvBuf, sizeofBuffer, MPI_INT, rightNeighbour, \ TAG, commCart, &requestArray[3]); /* Finish the sends and receives with an MPI_Waitall on the requests */ MPI_Waitall(4, requestArray, statusArray); } /*Barrier to ensure master thread has completed transfer. */ #pragma omp barrier /* Each thread now reads its part of the left and right received buffers. */ #pragma omp for schedule(static,dataSize) for(i=0; i<sizeofBuffer; i++){ finalLeftBuf[i] = leftRecvBuf[i]; finalRightBuf[i] = rightRecvBuf[i]; } } } return 0; } /*-----------------------------------------------------------*/ /* multipleHaloexchange */ /* */ /* Each process exchanges a message with its left and */ /* right neighbour. */ /* All threads take part in the inter-porcess */ /* communication. */ /*-----------------------------------------------------------*/ int multipleHaloexchange(int totalReps, int dataSize){ int repIter, i; int lBound; /* Open the parallel region */ #pragma omp parallel \ private(i,requestArray,statusArray,lBound,repIter) \ shared(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf) \ shared(rightRecvBuf,leftRecvBuf,finalLeftBuf,finalRightBuf) \ shared(leftNeighbour,rightNeighbour,globalIDarray,commCart,totalReps) { for (repIter=0; repIter<totalReps; repIter++){ /* Calculate lower bound for each thread */ lBound = (myThreadID * dataSize); /* Each thread writes its globalID to rightSendBuf * and leftSendBuf. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ leftSendBuf[i] = globalIDarray[myThreadID]; rightSendBuf[i] = globalIDarray[myThreadID]; } /* Each thread starts send of dataSize items to leftNeighbour * and to rightNeighbour. */ MPI_Isend(&leftSendBuf[lBound], dataSize, MPI_INT, leftNeighbour, \ myThreadID, commCart, &requestArray[0]); MPI_Isend(&rightSendBuf[lBound], dataSize, MPI_INT, rightNeighbour, \ myThreadID, commCart, &requestArray[1]); /* Each Thread then starts receive of messages from leftNeighbour * and rightNeighbour. */ MPI_Irecv(&leftRecvBuf[lBound], dataSize, MPI_INT, leftNeighbour, \ myThreadID, commCart, &requestArray[2]); MPI_Irecv(&rightRecvBuf[lBound], dataSize, MPI_INT, rightNeighbour, \ myThreadID, commCart, &requestArray[3]); /* Finish the sends with an MPI_Waitall on the requests */ MPI_Waitall(4, requestArray, statusArray); /* Each thread now reads its part of the left and * right received buffers. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalLeftBuf[i] = leftRecvBuf[i]; finalRightBuf[i] = rightRecvBuf[i]; } } } return 0; } /*-----------------------------------------------------------*/ /* allocateHaloexchangeData */ /* */ /* Allocate memory for the main data arrays in the */ /* haloexchange. */ /*-----------------------------------------------------------*/ int allocateHaloexchangeData(int sizeofBuffer){ leftSendBuf = (int *)malloc(sizeofBuffer * sizeof(int)); leftRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int)); rightSendBuf = (int *)malloc(sizeofBuffer * sizeof(int)); rightRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int)); finalLeftBuf = (int *)malloc(sizeofBuffer * sizeof(int)); finalRightBuf = (int *)malloc(sizeofBuffer * sizeof(int)); return 0; } /*-----------------------------------------------------------*/ /* freeHaloexchangeData */ /* */ /* Deallocates the storage space for the main data arrays. */ /*-----------------------------------------------------------*/ int freeHaloexchangeData(){ free(leftSendBuf); free(leftRecvBuf); free(rightSendBuf); free(rightRecvBuf); free(finalLeftBuf); free(finalRightBuf); return 0; } /*-----------------------------------------------------------*/ /* testHaloexchange */ /* */ /* Verifies that the halo exchange benchmark worked */ /* correctly. */ /*-----------------------------------------------------------*/ int testHaloexchange(int sizeofBuffer, int dataSize){ int i; int testFlag, reduceFlag; int *testLeftBuf, *testRightBuf; /* set testFlag to true */ testFlag = TRUE; /*allocate space for testLeftBuf and testRightBuf */ testLeftBuf = (int *)malloc(sizeofBuffer * sizeof(int)); testRightBuf = (int *)malloc(sizeofBuffer * sizeof(int)); /*construct testLeftBuf and testRightBuf with correct values */ #pragma omp parallel for default(none) \ private(i) \ shared(leftNeighbour,rightNeighbour,numThreads) \ shared(dataSize,sizeofBuffer,testLeftBuf,testRightBuf) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ /* Calculate globalID of thread expected in finalLeftBuf.. */ testLeftBuf[i] = (leftNeighbour * numThreads) + myThreadID; /* ..and in finalRightBuf. */ testRightBuf[i] = (rightNeighbour * numThreads) + myThreadID; } /* Compare.. */ for (i=0; i<sizeofBuffer; i++){ /* 1) values from left neighbour */ if (testLeftBuf[i] != finalLeftBuf[i]){ testFlag = FALSE; } /* 2) values from right neighbour */ if (testRightBuf[i] != finalRightBuf[i]){ testFlag = FALSE; } } MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm); /* Master then sets testOutcome flag */ if (myMPIRank == 0){ setTestOutcome(reduceFlag); } /* free space for testLeftBuf and testRightBuf */ free(testLeftBuf); free(testRightBuf); return 0; }
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__) #define MXNET_USE_MKL_DROPOUT 1 #endif #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; mxnet::TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0)) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false)) .describe("Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { Stream<xpu> *s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; if (sizeof(DType) > sizeof(int)) { // allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr` Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s); maskptr = temp.dptr_; } BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1; outptr[i] = dataptr[i] * maskVal; mask.dptr_[i] = maskVal; } } // MKL backward pass inline void MKLBackward(const OpContext &ctx, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; const DType *maskptr = mask.dptr_; const int count = mask.shape_[0] * mask.shape_[1]; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i]; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam &param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext &ctx, const TBlob &in, const TBlob &mask, const TBlob &out) { Stream<xpu> *s = ctx.get_stream<xpu>(); // set dropout state. ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext &ctx, const TBlob &out_grad, const TBlob &mask, const TBlob &in_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &in = in_data[dropout::kData]; const TBlob &out = out_data[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { if (req[dropout::kOut] == kWriteInplace) return; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ mxnet::TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp template<typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
par_nongalerkin.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "../HYPRE.h" /* This file contains the routines for constructing non-Galerkin coarse grid * operators, based on the original Galerkin coarse grid */ /* Take all of the indices from indices[start, start+1, start+2, ..., end] * and take the corresponding entries in array and place them in-order in output. * Assumptions: * output is of length end-start+1 * indices never contains an index that goes out of bounds in array * */ HYPRE_Int hypre_GrabSubArray(HYPRE_Int * indices, HYPRE_Int start, HYPRE_Int end, HYPRE_BigInt * array, HYPRE_BigInt * output) { HYPRE_Int i, length; length = end - start + 1; for(i = 0; i < length; i++) { output[i] = array[ indices[start + i] ]; } return 0; } /* Compute the intersection of x and y, placing * the intersection in z. Additionally, the array * x_data is associated with x, i.e., the entries * that we grab from x, we also grab from x_data. * If x[k] is placed in z[m], then x_data[k] goes to * output_x_data[m]. * * Assumptions: * z is of length min(x_length, y_length) * x and y are sorted * x_length and y_length are similar in size, otherwise, * looping over the smaller array and doing binary search * in the longer array is faster. * */ HYPRE_Int hypre_IntersectTwoArrays(HYPRE_Int *x, HYPRE_Real *x_data, HYPRE_Int x_length, HYPRE_Int *y, HYPRE_Int y_length, HYPRE_Int *z, HYPRE_Real *output_x_data, HYPRE_Int *intersect_length) { HYPRE_Int x_index = 0; HYPRE_Int y_index = 0; *intersect_length = 0; /* Compute Intersection, looping over each array */ while ( (x_index < x_length) && (y_index < y_length) ) { if (x[x_index] > y[y_index]) { y_index = y_index + 1; } else if (x[x_index] < y[y_index]) { x_index = x_index + 1; } else { z[*intersect_length] = x[x_index]; output_x_data[*intersect_length] = x_data[x_index]; x_index = x_index + 1; y_index = y_index + 1; *intersect_length = *intersect_length + 1; } } return 1; } HYPRE_Int hypre_IntersectTwoBigArrays(HYPRE_BigInt *x, HYPRE_Real *x_data, HYPRE_Int x_length, HYPRE_BigInt *y, HYPRE_Int y_length, HYPRE_BigInt *z, HYPRE_Real *output_x_data, HYPRE_Int *intersect_length) { HYPRE_Int x_index = 0; HYPRE_Int y_index = 0; *intersect_length = 0; /* Compute Intersection, looping over each array */ while ( (x_index < x_length) && (y_index < y_length) ) { if (x[x_index] > y[y_index]) { y_index = y_index + 1; } else if (x[x_index] < y[y_index]) { x_index = x_index + 1; } else { z[*intersect_length] = x[x_index]; output_x_data[*intersect_length] = x_data[x_index]; x_index = x_index + 1; y_index = y_index + 1; *intersect_length = *intersect_length + 1; } } return 1; } /* Copy CSR matrix A to CSR matrix B. The column indices are * assumed to be sorted, and the sparsity pattern of B is a subset * of the sparsity pattern of A. * * Assumptions: * Column indices of A and B are sorted * Sparsity pattern of B is a subset of A's * A and B are the same size and have same data layout **/ HYPRE_Int hypre_SortedCopyParCSRData(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { /* Grab off A and B's data structures */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Real *B_diag_data = hypre_CSRMatrixData(B_diag); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Real *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *temp_int_array = NULL; HYPRE_Int temp_int_array_length=0; HYPRE_Int i, length, offset_A, offset_B; for(i = 0; i < num_variables; i++) { /* Deal with the first row entries, which may be diagonal elements */ if( A_diag_j[A_diag_i[i]] == i) { offset_A = 1; } else { offset_A = 0; } if( B_diag_j[B_diag_i[i]] == i) { offset_B = 1; } else { offset_B = 0; } if( (offset_B == 1) && (offset_A == 1) ) { B_diag_data[B_diag_i[i]] = A_diag_data[A_diag_i[i]]; } /* This finds the intersection of the column indices, and * also copies the matching data in A to the data array in B **/ if( (A_diag_i[i+1] - A_diag_i[i] - offset_A) > temp_int_array_length ) { hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST); temp_int_array_length = (A_diag_i[i+1] - A_diag_i[i] - offset_A); temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST); } hypre_IntersectTwoArrays(&(A_diag_j[A_diag_i[i] + offset_A]), &(A_diag_data[A_diag_i[i] + offset_A]), A_diag_i[i+1] - A_diag_i[i] - offset_A, &(B_diag_j[B_diag_i[i] + offset_B]), B_diag_i[i+1] - B_diag_i[i] - offset_B, temp_int_array, &(B_diag_data[B_diag_i[i] + offset_B]), &length); if( (A_offd_i[i+1] - A_offd_i[i]) > temp_int_array_length ) { hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST); temp_int_array_length = (A_offd_i[i+1] - A_offd_i[i]); temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST); } hypre_IntersectTwoArrays(&(A_offd_j[A_offd_i[i]]), &(A_offd_data[A_offd_i[i]]), A_offd_i[i+1] - A_offd_i[i], &(B_offd_j[B_offd_i[i]]), B_offd_i[i+1] - B_offd_i[i], temp_int_array, &(B_offd_data[B_offd_i[i]]), &length); } if(temp_int_array) { hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST); } return 1; } /* * Equivalent to hypre_BoomerAMGCreateS, except, the data array of S * is not Null and contains the data entries from A. */ HYPRE_Int hypre_BoomerAMG_MyCreateS(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; HYPRE_Real *S_diag_data; hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; HYPRE_Real *S_offd_data; HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = aij, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; /* Initialize S */ S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); hypre_CSRMatrixData(S_diag) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_diag_data = hypre_CSRMatrixData(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); hypre_CSRMatrixData(S_offd) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_offd_j = hypre_CSRMatrixJ(S_offd); S_offd_data = hypre_CSRMatrixData(S_offd); hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /* give S same nonzero structure as A */ hypre_ParCSRMatrixCopy(A,S,1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_variables; i++) { diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } } else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* compute row entries of S */ S_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_offd_j[jA] = -1; } } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } } else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } } } } /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may not be removed, the * non-Galerkin routine depends on it. *----------------------------------------------------------------*/ /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_diag_j[jA] > -1) { S_diag_j[jS] = S_diag_j[jA]; S_diag_data[jS] = S_diag_data[jA]; jS++; } } } S_diag_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_diag) = jS; /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_offd_j[jA] > -1) { S_offd_j[jS] = S_offd_j[jA]; S_offd_data[jS] = S_offd_data[jA]; jS++; } } } S_offd_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_offd) = jS; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return (ierr); } /** * Initialize the IJBuffer counters **/ HYPRE_Int hypre_NonGalerkinIJBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */ HYPRE_Int *ijbuf_rowcounter, HYPRE_Int *ijbuf_numcols ) { HYPRE_Int ierr = 0; (*ijbuf_cnt) = 0; (*ijbuf_rowcounter) = 1; /*Always points to the next row*/ ijbuf_numcols[0] = 0; return ierr; } /** * Initialize the IJBuffer counters **/ HYPRE_Int hypre_NonGalerkinIJBigBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */ HYPRE_Int *ijbuf_rowcounter, HYPRE_BigInt *ijbuf_numcols ) { HYPRE_Int ierr = 0; (*ijbuf_cnt) = 0; (*ijbuf_rowcounter) = 1; /*Always points to the next row*/ ijbuf_numcols[0] = 0; return ierr; } /** * Update the buffer counters **/ HYPRE_Int hypre_NonGalerkinIJBufferNewRow(HYPRE_BigInt *ijbuf_rownums, /* See NonGalerkinIJBufferWrite for parameter descriptions */ HYPRE_Int *ijbuf_numcols, HYPRE_Int *ijbuf_rowcounter, HYPRE_BigInt new_row) { HYPRE_Int ierr = 0; /* First check to see if the previous row was empty, and if so, overwrite that row */ if( ijbuf_numcols[(*ijbuf_rowcounter)-1] == 0 ) { ijbuf_rownums[(*ijbuf_rowcounter)-1] = new_row; } else { /* Move to the next row */ ijbuf_rownums[(*ijbuf_rowcounter)] = new_row; ijbuf_numcols[(*ijbuf_rowcounter)] = 0; (*ijbuf_rowcounter)++; } return ierr; } /** * Compress the current row in an IJ Buffer by removing duplicate entries **/ HYPRE_Int hypre_NonGalerkinIJBufferCompressRow( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */ HYPRE_Int ijbuf_rowcounter, HYPRE_Real *ijbuf_data, HYPRE_BigInt *ijbuf_cols, HYPRE_BigInt *ijbuf_rownums, HYPRE_Int *ijbuf_numcols) { HYPRE_Int ierr = 0; HYPRE_Int nentries, i, nduplicate; /* Compress the current row by removing any repeat entries, * making sure to decrement ijbuf_cnt by nduplicate */ nentries = ijbuf_numcols[ ijbuf_rowcounter-1 ]; nduplicate = 0; hypre_BigQsort1(ijbuf_cols, ijbuf_data, (*ijbuf_cnt)-nentries, (*ijbuf_cnt)-1 ); for(i =(*ijbuf_cnt)-nentries+1; i <= (*ijbuf_cnt)-1; i++) { if( ijbuf_cols[i] == ijbuf_cols[i-1] ) { /* Shift duplicate entry down */ nduplicate++; ijbuf_data[i - nduplicate] += ijbuf_data[i]; } else if(nduplicate > 0) { ijbuf_data[i - nduplicate] = ijbuf_data[i]; ijbuf_cols[i - nduplicate] = ijbuf_cols[i]; } } (*ijbuf_cnt) -= nduplicate; ijbuf_numcols[ ijbuf_rowcounter-1 ] -= nduplicate; return ierr; } /** * Compress the entire buffer, removing duplicate rows **/ HYPRE_Int hypre_NonGalerkinIJBufferCompress( HYPRE_Int ijbuf_size, HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */ HYPRE_Int *ijbuf_rowcounter, HYPRE_Real **ijbuf_data, HYPRE_BigInt **ijbuf_cols, HYPRE_BigInt **ijbuf_rownums, HYPRE_Int **ijbuf_numcols) { HYPRE_Int ierr = 0; HYPRE_Int *indys = hypre_CTAlloc(HYPRE_Int, (*ijbuf_rowcounter) , HYPRE_MEMORY_HOST); HYPRE_Int i, j, duplicate, cnt_new, rowcounter_new, prev_row; HYPRE_Int row_loc; HYPRE_BigInt row_start, row_stop, row; HYPRE_Real *data_new; HYPRE_BigInt *cols_new; HYPRE_BigInt *rownums_new; HYPRE_Int *numcols_new; /* Do a sort on rownums, but store the original order in indys. * Then see if there are any duplicate rows */ for(i = 0; i < (*ijbuf_rowcounter); i++) { indys[i] = i; } hypre_BigQsortbi((*ijbuf_rownums), indys, 0, (*ijbuf_rowcounter)-1); duplicate = 0; for(i = 1; i < (*ijbuf_rowcounter); i++) { if(indys[i] != (indys[i-1]+1)) { duplicate = 1; break; } } /* Compress duplicate rows */ if(duplicate) { /* Accumulate numcols, so that it functions like a CSR row-pointer */ for(i = 1; i < (*ijbuf_rowcounter); i++) { (*ijbuf_numcols)[i] += (*ijbuf_numcols)[i-1]; } /* Initialize new buffer */ prev_row = -1; rowcounter_new = 0; cnt_new = 0; data_new = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE); cols_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); rownums_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); numcols_new = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE); numcols_new[0] = 0; /* Cycle through each row */ for(i = 0; i < (*ijbuf_rowcounter); i++) { /* Find which row this is in local and global numberings, and where * this row's data starts and stops in the buffer*/ row_loc = indys[i]; row = (*ijbuf_rownums)[i]; if(row_loc > 0) { row_start = (*ijbuf_numcols)[row_loc-1]; row_stop = (*ijbuf_numcols)[row_loc]; } else { row_start = 0; row_stop = (*ijbuf_numcols)[row_loc]; } /* Is this a new row? If so, compress previous row, and add a new * one. Noting that prev_row = -1 is a special value */ if(row != prev_row) { if(prev_row != -1) { /* Compress previous row */ hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new, cols_new, rownums_new, numcols_new); } prev_row = row; numcols_new[rowcounter_new] = 0; rownums_new[rowcounter_new] = row; rowcounter_new++; } /* Copy row into new buffer */ for(j = row_start; j < row_stop; j++) { data_new[cnt_new] = (*ijbuf_data)[j]; cols_new[cnt_new] = (*ijbuf_cols)[j]; numcols_new[rowcounter_new-1]++; cnt_new++; } } /* Compress the final row */ if(i > 1) { hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new, cols_new, rownums_new, numcols_new); } *ijbuf_cnt = cnt_new; *ijbuf_rowcounter = rowcounter_new; /* Point to the new buffer */ hypre_TFree(*ijbuf_data, HYPRE_MEMORY_DEVICE); hypre_TFree(*ijbuf_cols, HYPRE_MEMORY_DEVICE); hypre_TFree(*ijbuf_rownums, HYPRE_MEMORY_DEVICE); hypre_TFree(*ijbuf_numcols, HYPRE_MEMORY_DEVICE); (*ijbuf_data) = data_new; (*ijbuf_cols) = cols_new; (*ijbuf_rownums) = rownums_new; (*ijbuf_numcols) = numcols_new; } hypre_TFree(indys, HYPRE_MEMORY_HOST); return ierr; } /** * Do a buffered write to an IJ matrix. * That is, write to the buffer, until the buffer is full. Then when the * buffer is full, write to the IJ matrix and reset the buffer counters * In effect, this buffers this operation * A[row_to_write, col_to_write] += val_to_write **/ HYPRE_Int hypre_NonGalerkinIJBufferWrite( HYPRE_IJMatrix B, /* Unassembled matrix to add an entry to */ HYPRE_Int *ijbuf_cnt, /* current buffer size */ HYPRE_Int ijbuf_size, /* max buffer size */ HYPRE_Int *ijbuf_rowcounter, /* num of rows in rownums, (i.e., size of rownums) */ /* This counter will increase as you call this function for multiple rows */ HYPRE_Real **ijbuf_data, /* Array of values, of size ijbuf_size */ HYPRE_BigInt **ijbuf_cols, /* Array of col indices, of size ijbuf_size */ HYPRE_BigInt **ijbuf_rownums, /* Holds row-indices that with numcols makes for a CSR-like data structure*/ HYPRE_Int **ijbuf_numcols, /* rownums[i] is the row num, and numcols holds the number of entries being added */ /* for that row. Note numcols is not cumulative like an actual CSR data structure*/ HYPRE_BigInt row_to_write, /* Entry to add to the buffer */ HYPRE_BigInt col_to_write, /* Ditto */ HYPRE_Real val_to_write ) /* Ditto */ { HYPRE_Int ierr = 0; if( (*ijbuf_cnt) == 0 ) { /* brand new buffer: increment buffer structures for the new row */ hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write); } else if((*ijbuf_rownums)[ (*ijbuf_rowcounter)-1 ] != row_to_write) { /* If this is a new row, compress the previous row */ hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data), (*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols)); /* increment buffer structures for the new row */ hypre_NonGalerkinIJBufferNewRow( (*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write); } /* Add new entry to buffer */ (*ijbuf_cols)[(*ijbuf_cnt)] = col_to_write; (*ijbuf_data)[(*ijbuf_cnt)] = val_to_write; (*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ]++; (*ijbuf_cnt)++; /* Buffer is full, write to the matrix object */ if ( (*ijbuf_cnt) == (ijbuf_size-1) ) { /* If the last row is empty, decrement rowcounter */ if( (*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ] == 0) { (*ijbuf_rowcounter)--; } /* Compress and Add Entries */ hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data), (*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols)); hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, ijbuf_rowcounter, ijbuf_data, ijbuf_cols, ijbuf_rownums, ijbuf_numcols); ierr += HYPRE_IJMatrixAddToValues(B, *ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data)); /* Reinitialize the buffer */ hypre_NonGalerkinIJBufferInit( ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_numcols)); hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write); } return ierr; } /** * Empty the IJ Buffer with a final AddToValues. **/ HYPRE_Int hypre_NonGalerkinIJBufferEmpty(HYPRE_IJMatrix B, /* See NonGalerkinIJBufferWrite for parameter descriptions */ HYPRE_Int ijbuf_size, HYPRE_Int *ijbuf_cnt, HYPRE_Int ijbuf_rowcounter, HYPRE_Real **ijbuf_data, HYPRE_BigInt **ijbuf_cols, HYPRE_BigInt **ijbuf_rownums, HYPRE_Int **ijbuf_numcols) { HYPRE_Int ierr = 0; if( (*ijbuf_cnt) > 0) { /* Compress the last row and then write */ hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_data), (*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols)); hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, &ijbuf_rowcounter, ijbuf_data, ijbuf_cols, ijbuf_rownums, ijbuf_numcols); ierr += HYPRE_IJMatrixAddToValues(B, ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data)); } (*ijbuf_cnt = 0); return ierr; } /* * Construct sparsity pattern based on R_I A P, plus entries required by drop tolerance */ hypre_ParCSRMatrix * hypre_NonGalerkinSparsityPattern(hypre_ParCSRMatrix *R_IAP, hypre_ParCSRMatrix *RAP, HYPRE_Int * CF_marker, HYPRE_Real droptol, HYPRE_Int sym_collapse, HYPRE_Int collapse_beta ) { /* MPI Communicator */ MPI_Comm comm = hypre_ParCSRMatrixComm(RAP); /* Declare R_IAP */ hypre_CSRMatrix *R_IAP_diag = hypre_ParCSRMatrixDiag(R_IAP); HYPRE_Int *R_IAP_diag_i = hypre_CSRMatrixI(R_IAP_diag); HYPRE_Int *R_IAP_diag_j = hypre_CSRMatrixJ(R_IAP_diag); hypre_CSRMatrix *R_IAP_offd = hypre_ParCSRMatrixOffd(R_IAP); HYPRE_Int *R_IAP_offd_i = hypre_CSRMatrixI(R_IAP_offd); HYPRE_Int *R_IAP_offd_j = hypre_CSRMatrixJ(R_IAP_offd); HYPRE_BigInt *col_map_offd_R_IAP = hypre_ParCSRMatrixColMapOffd(R_IAP); /* Declare RAP */ hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP); HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag); HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag); HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag); HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP); HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag); HYPRE_BigInt last_col_diag_RAP = first_col_diag_RAP + (HYPRE_BigInt)num_cols_diag_RAP - 1; hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP); HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd); HYPRE_Real *RAP_offd_data = NULL; HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd); HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP); HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag); /* Declare A */ HYPRE_Int num_fine_variables = hypre_CSRMatrixNumRows(R_IAP_diag); /* Declare IJ matrices */ HYPRE_IJMatrix Pattern; hypre_ParCSRMatrix *Pattern_CSR = NULL; /* Buffered IJAddToValues */ HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter; HYPRE_Real *ijbuf_data; HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums; HYPRE_Int *ijbuf_numcols; /* Buffered IJAddToValues for Symmetric Entries */ HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter; HYPRE_Real *ijbuf_sym_data; HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums; HYPRE_Int *ijbuf_sym_numcols; /* Other Declarations */ HYPRE_Int ierr = 0; HYPRE_Real max_entry = 0.0; HYPRE_Real max_entry_offd = 0.0; HYPRE_Int * rownz = NULL; HYPRE_Int i, j, Cpt; HYPRE_BigInt row_start, row_end, global_row, global_col; /* Other Setup */ if (num_cols_RAP_offd) { RAP_offd_data = hypre_CSRMatrixData(RAP_offd); } /* * Initialize the IJ matrix, leveraging our rough knowledge of the * nonzero structure of Pattern based on RAP * * ilower, iupper, jlower, jupper */ ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP, first_col_diag_RAP, last_col_diag_RAP, &Pattern); ierr += HYPRE_IJMatrixSetObjectType(Pattern, HYPRE_PARCSR); rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); for(i = 0; i < num_variables; i++) { rownz[i] = 1.2*(RAP_diag_i[i+1] - RAP_diag_i[i]) + 1.2*(RAP_offd_i[i+1] - RAP_offd_i[i]); } HYPRE_IJMatrixSetRowSizes(Pattern, rownz); ierr += HYPRE_IJMatrixInitialize(Pattern); hypre_TFree(rownz, HYPRE_MEMORY_HOST); /* *For efficiency, we do a buffered IJAddToValues. * Here, we initialize the buffer and then initialize the buffer counters */ ijbuf_size = 1000; ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE); hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols ); if(sym_collapse) { ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE); hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols ); } /* * Place entries in R_IAP into Pattern */ Cpt = -1; /* Cpt contains the fine grid index of the i-th Cpt */ for(i = 0; i < num_variables; i++) { global_row = i+first_col_diag_RAP; /* Find the next Coarse Point in CF_marker */ for(j = Cpt+1; j < num_fine_variables; j++) { if(CF_marker[j] == 1) /* Found Next C-point */ { Cpt = j; break; } } /* Diag Portion */ row_start = R_IAP_diag_i[Cpt]; row_end = R_IAP_diag_i[Cpt+1]; for(j = row_start; j < row_end; j++) { global_col = R_IAP_diag_j[j] + first_col_diag_RAP; /* This call adds a 1 x 1 to i j data */ hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_col, 1.0); if (sym_collapse) { hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, global_col, global_row, 1.0); } } /* Offdiag Portion */ row_start = R_IAP_offd_i[Cpt]; row_end = R_IAP_offd_i[Cpt+1]; for(j = row_start; j < row_end; j++) { global_col = col_map_offd_R_IAP[ R_IAP_offd_j[j] ]; /* This call adds a 1 x 1 to i j data */ hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_col, 1.0); if (sym_collapse) { hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, global_col, global_row, 1.0); } } } /* * Use drop-tolerance to compute new entries for sparsity pattern */ /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE #endif */ for(i = 0; i < num_variables; i++) { global_row = i+first_col_diag_RAP; /* Compute the drop tolerance for this row, which is just * abs(max of row i)*droptol */ max_entry = -1.0; for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++) { if( (RAP_diag_j[j] != i) && (max_entry < fabs(RAP_diag_data[j]) ) ) { max_entry = fabs(RAP_diag_data[j]); } } for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++) { { if( max_entry < fabs(RAP_offd_data[j]) ) { max_entry = fabs(RAP_offd_data[j]); } } } max_entry *= droptol; max_entry_offd = max_entry*collapse_beta; /* Loop over diag portion, adding all entries that are "strong" */ for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++) { if( fabs(RAP_diag_data[j]) > max_entry ) { global_col = RAP_diag_j[j] + first_col_diag_RAP; /*#ifdef HYPRE_USING_OPENMP #pragma omp critical (IJAdd) #endif {*/ /* For efficiency, we do a buffered IJAddToValues * A[global_row, global_col] += 1.0 */ hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_col, 1.0 ); if(sym_collapse) { hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, global_col, global_row, 1.0 ); } /*}*/ } } /* Loop over offd portion, adding all entries that are "strong" */ for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++) { if( fabs(RAP_offd_data[j]) > max_entry_offd ) { global_col = col_map_offd_RAP[ RAP_offd_j[j] ]; /*#ifdef HYPRE_USING_OPENMP #pragma omp critical (IJAdd) #endif {*/ /* For efficiency, we do a buffered IJAddToValues * A[global_row, global_col] += 1.0 */ hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_col, 1.0 ); if(sym_collapse) { hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, global_col, global_row, 1.0 ); } /*}*/ } } } /* For efficiency, we do a buffered IJAddToValues. * This empties the buffer of any remaining values */ hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols); if(sym_collapse) hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols); /* Finalize Construction of Pattern */ ierr += HYPRE_IJMatrixAssemble(Pattern); ierr += HYPRE_IJMatrixGetObject( Pattern, (void**) &Pattern_CSR ); /* Deallocate */ ierr += HYPRE_IJMatrixSetObjectType(Pattern, -1); ierr += HYPRE_IJMatrixDestroy(Pattern); hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE); if(sym_collapse) { hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE); } return Pattern_CSR; } HYPRE_Int hypre_BoomerAMGBuildNonGalerkinCoarseOperator( hypre_ParCSRMatrix **RAP_ptr, hypre_ParCSRMatrix *AP, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int * dof_func_value, HYPRE_Real S_commpkg_switch, HYPRE_Int * CF_marker, HYPRE_Real droptol, HYPRE_Int sym_collapse, HYPRE_Real lump_percent, HYPRE_Int collapse_beta ) { /* Initializations */ MPI_Comm comm = hypre_ParCSRMatrixComm(*RAP_ptr); hypre_ParCSRMatrix *S = NULL; hypre_ParCSRMatrix *RAP = *RAP_ptr; HYPRE_Int *col_offd_S_to_A = NULL; HYPRE_Int i, j, k, row_start, row_end, value, num_cols_offd_Sext, num_procs; HYPRE_Int S_ext_diag_size, S_ext_offd_size, last_col_diag_RAP, cnt_offd, cnt_diag, cnt; HYPRE_Int col_indx_Pattern, current_Pattern_j, col_indx_RAP; /* HYPRE_Real start_time = hypre_MPI_Wtime(); */ /* HYPRE_Real end_time; */ HYPRE_BigInt *temp = NULL; HYPRE_Int ierr = 0; char filename[256]; /* Lumping related variables */ HYPRE_IJMatrix ijmatrix; HYPRE_BigInt * Pattern_offd_indices = NULL; HYPRE_BigInt * S_offd_indices = NULL; HYPRE_BigInt * offd_intersection = NULL; HYPRE_Real * offd_intersection_data = NULL; HYPRE_Int * diag_intersection = NULL; HYPRE_Real * diag_intersection_data = NULL; HYPRE_Int Pattern_offd_indices_len = 0; HYPRE_Int Pattern_offd_indices_allocated_len= 0; HYPRE_Int S_offd_indices_len = 0; HYPRE_Int S_offd_indices_allocated_len = 0; HYPRE_Int offd_intersection_len = 0; HYPRE_Int offd_intersection_allocated_len = 0; HYPRE_Int diag_intersection_len = 0; HYPRE_Int diag_intersection_allocated_len = 0; HYPRE_Real intersection_len = 0; HYPRE_Int * Pattern_indices_ptr = NULL; HYPRE_Int Pattern_diag_indices_len = 0; HYPRE_Int global_row = 0; HYPRE_Int has_row_ended = 0; HYPRE_Real lump_value = 0.; HYPRE_Real diagonal_lump_value = 0.; HYPRE_Real neg_lump_value = 0.; HYPRE_Real sum_strong_neigh = 0.; HYPRE_Int * rownz = NULL; /* offd and diag portions of RAP */ hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP); HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag); HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag); HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag); HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP); HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag); hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP); HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd); HYPRE_Real *RAP_offd_data = NULL; HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd); HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP); HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(RAP); /* offd and diag portions of S */ hypre_CSRMatrix *S_diag = NULL; HYPRE_Int *S_diag_i = NULL; HYPRE_Real *S_diag_data = NULL; HYPRE_Int *S_diag_j = NULL; hypre_CSRMatrix *S_offd = NULL; HYPRE_Int *S_offd_i = NULL; HYPRE_Real *S_offd_data = NULL; HYPRE_Int *S_offd_j = NULL; HYPRE_BigInt *col_map_offd_S = NULL; HYPRE_Int num_cols_offd_S; /* HYPRE_Int num_nonzeros_S_diag; */ /* off processor portions of S */ hypre_CSRMatrix *S_ext = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_Real *S_ext_data = NULL; HYPRE_BigInt *S_ext_j = NULL; HYPRE_Int *S_ext_diag_i = NULL; HYPRE_Real *S_ext_diag_data = NULL; HYPRE_Int *S_ext_diag_j = NULL; HYPRE_Int *S_ext_offd_i = NULL; HYPRE_Real *S_ext_offd_data = NULL; HYPRE_Int *S_ext_offd_j = NULL; HYPRE_BigInt *col_map_offd_Sext = NULL; /* HYPRE_Int num_nonzeros_S_ext_diag; HYPRE_Int num_nonzeros_S_ext_offd; HYPRE_Int num_rows_Sext = 0; */ HYPRE_Int row_indx_Sext = 0; /* offd and diag portions of Pattern */ hypre_ParCSRMatrix *Pattern = NULL; hypre_CSRMatrix *Pattern_diag = NULL; HYPRE_Int *Pattern_diag_i = NULL; HYPRE_Real *Pattern_diag_data = NULL; HYPRE_Int *Pattern_diag_j = NULL; hypre_CSRMatrix *Pattern_offd = NULL; HYPRE_Int *Pattern_offd_i = NULL; HYPRE_Real *Pattern_offd_data = NULL; HYPRE_Int *Pattern_offd_j = NULL; HYPRE_BigInt *col_map_offd_Pattern = NULL; HYPRE_Int num_cols_Pattern_offd; HYPRE_Int my_id; /* Buffered IJAddToValues */ HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter; HYPRE_Real *ijbuf_data; HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums; HYPRE_Int *ijbuf_numcols; /* Buffered IJAddToValues for Symmetric Entries */ HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter; HYPRE_Real *ijbuf_sym_data; HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums; HYPRE_Int *ijbuf_sym_numcols; /* Further Initializations */ if (num_cols_RAP_offd) { RAP_offd_data = hypre_CSRMatrixData(RAP_offd); } hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Compute Sparsity Pattern */ Pattern = hypre_NonGalerkinSparsityPattern(AP, RAP, CF_marker, droptol, sym_collapse, collapse_beta); Pattern_diag = hypre_ParCSRMatrixDiag(Pattern); Pattern_diag_i = hypre_CSRMatrixI(Pattern_diag); Pattern_diag_data = hypre_CSRMatrixData(Pattern_diag); Pattern_diag_j = hypre_CSRMatrixJ(Pattern_diag); Pattern_offd = hypre_ParCSRMatrixOffd(Pattern); Pattern_offd_i = hypre_CSRMatrixI(Pattern_offd); Pattern_offd_j = hypre_CSRMatrixJ(Pattern_offd); col_map_offd_Pattern = hypre_ParCSRMatrixColMapOffd(Pattern); num_cols_Pattern_offd = hypre_CSRMatrixNumCols(Pattern_offd); if (num_cols_Pattern_offd) { Pattern_offd_data = hypre_CSRMatrixData(Pattern_offd); } /** * Fill in the entries of Pattern with entries from RAP **/ /* First, sort column indices in RAP and Pattern */ for(i = 0; i < num_variables; i++) { /* The diag matrices store the diagonal as first element in each row. * We maintain that for the case of Pattern and RAP, because the * strength of connection routine relies on it and we need to ignore * diagonal entries in Pattern later during set intersections. * */ /* Sort diag portion of RAP */ row_start = RAP_diag_i[i]; if( RAP_diag_j[row_start] == i) { row_start = row_start + 1; } row_end = RAP_diag_i[i+1]; hypre_qsort1(RAP_diag_j, RAP_diag_data, row_start, row_end-1 ); /* Sort diag portion of Pattern */ row_start = Pattern_diag_i[i]; if( Pattern_diag_j[row_start] == i) { row_start = row_start + 1; } row_end = Pattern_diag_i[i+1]; hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 ); /* Sort offd portion of RAP */ row_start = RAP_offd_i[i]; row_end = RAP_offd_i[i+1]; hypre_qsort1(RAP_offd_j, RAP_offd_data, row_start, row_end-1 ); /* Sort offd portion of Pattern */ /* Be careful to map coarse dof i with CF_marker into Pattern */ row_start = Pattern_offd_i[i]; row_end = Pattern_offd_i[i+1]; hypre_qsort1(Pattern_offd_j, Pattern_offd_data, row_start, row_end-1 ); } /* Create Strength matrix based on RAP or Pattern. If Pattern is used, * then the SortedCopyParCSRData(...) function call must also be commented * back in */ /* hypre_SortedCopyParCSRData(RAP, Pattern); */ if(0) { /* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum, */ hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum, num_functions, dof_func_value, &S); } else { /* Passing in "1, NULL" because dof_array is not needed * because we assume that the number of functions is 1 */ /* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum,*/ hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum, 1, NULL, &S); } /*if (0)*/ /*(strong_threshold > S_commpkg_switch)*/ /*{ hypre_BoomerAMG_MyCreateSCommPkg(RAP, S, &col_offd_S_to_A); }*/ /* Grab diag and offd parts of S */ S_diag = hypre_ParCSRMatrixDiag(S); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_diag_data = hypre_CSRMatrixData(S_diag); S_offd = hypre_ParCSRMatrixOffd(S); S_offd_i = hypre_CSRMatrixI(S_offd); S_offd_j = hypre_CSRMatrixJ(S_offd); S_offd_data = hypre_CSRMatrixData(S_offd); col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); /* num_nonzeros_S_diag = S_diag_i[num_variables]; */ /* Grab part of S that is distance one away from the local rows * This is needed later for the stencil collapsing. This section * of the code mimics par_rap.c when it extracts Ps_ext. * When moving from par_rap.c, the variable name changes were: * A --> RAP * P --> S * Ps_ext --> S_ext * P_ext_diag --> S_ext_diag * P_ext_offd --> S_ext_offd * * The data layout of S_ext as returned by ExtractBExt gives you only global * column indices, and must be converted to the local numbering. This code * section constructs S_ext_diag and S_ext_offd, which are the distance 1 * couplings in S based on the sparsity structure in RAP. * --> S_ext_diag corresponds to the same column slice that RAP_diag * corresponds to. Thus, the column indexing is the same as in * RAP_diag such that S_ext_diag_j[k] just needs to be offset by * the RAP_diag first global dof offset. * --> S_ext_offd column indexing is a little more complicated, and * requires the computation below of col_map_S_ext_offd, which * maps the local 0,1,2,... column indexing in S_ext_offd to global * dof numbers. Note, that the num_cols_RAP_offd is NOT equal to * num_cols_offd_S_ext * --> The row indexing of S_ext_diag|offd is as follows. Use * col_map_offd_RAP, where the first index corresponds to the * first global row index in S_ext_diag|offd. Remember that ExtractBExt * grabs the information from S required for locally computing * (RAP*S)[proc_k row slice, :] */ if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S,RAP,1); S_ext_data = hypre_CSRMatrixData(S_ext); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); } /* This uses the num_cols_RAP_offd to set S_ext_diag|offd_i, because S_ext * is the off-processor information needed to compute RAP*S. That is, * num_cols_RAP_offd represents the number of rows needed from S_ext for * the multiplication */ S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd+1, HYPRE_MEMORY_HOST); S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd+1, HYPRE_MEMORY_HOST); S_ext_diag_size = 0; S_ext_offd_size = 0; /* num_rows_Sext = num_cols_RAP_offd; */ last_col_diag_RAP = first_col_diag_RAP + num_cols_diag_RAP - 1; /* construct the S_ext_diag and _offd row-pointer arrays by counting elements * This looks to create offd and diag blocks related to the local rows belonging * to this processor...we may not need to split up S_ext this way...or we could. * It would make for faster binary searching and set intersecting later...this will * be the bottle neck so LETS SPLIT THIS UP Between offd and diag*/ for (i=0; i < num_cols_RAP_offd; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP) S_ext_offd_size++; else S_ext_diag_size++; S_ext_diag_i[i+1] = S_ext_diag_size; S_ext_offd_i[i+1] = S_ext_offd_size; } if (S_ext_diag_size) { S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); S_ext_diag_data = hypre_CTAlloc(HYPRE_Real, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); S_ext_offd_data = hypre_CTAlloc(HYPRE_Real, S_ext_offd_size, HYPRE_MEMORY_HOST); } /* This copies over the column indices into the offd and diag parts. * The diag portion has it's local column indices shifted to start at 0. * The offd portion requires more work to construct the col_map_offd array * and a local column ordering. */ cnt_offd = 0; cnt_diag = 0; cnt = 0; for (i=0; i < num_cols_RAP_offd; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP) { S_ext_offd_data[cnt_offd] = S_ext_data[j]; //S_ext_offd_j[cnt_offd++] = S_ext_j[j]; S_ext_j[cnt_offd++] = S_ext_j[j]; } else { S_ext_diag_data[cnt_diag] = S_ext_data[j]; S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(S_ext_j[j] - first_col_diag_RAP); } } /* This creates col_map_offd_Sext */ if (S_ext_offd_size || num_cols_offd_S) { temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_cols_offd_S, HYPRE_MEMORY_HOST); for (i=0; i < S_ext_offd_size; i++) temp[i] = S_ext_j[i]; cnt = S_ext_offd_size; for (i=0; i < num_cols_offd_S; i++) temp[cnt++] = col_map_offd_S[i]; } if (cnt) { /* after this, the first so many entries of temp will hold the * unique column indices in S_ext_offd_j unioned with the indices * in col_map_offd_S */ hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_Sext = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_Sext++] = value; } } } else { num_cols_offd_Sext = 0; } /* num_nonzeros_S_ext_diag = cnt_diag; num_nonzeros_S_ext_offd = S_ext_offd_size; */ if (num_cols_offd_Sext) col_map_offd_Sext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Sext, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_Sext; i++) col_map_offd_Sext[i] = temp[i]; if (S_ext_offd_size || num_cols_offd_S) hypre_TFree(temp, HYPRE_MEMORY_HOST); /* look for S_ext_offd_j[i] in col_map_offd_Sext, and set S_ext_offd_j[i] * to the index of that column value in col_map_offd_Sext */ for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Sext, S_ext_j[i], num_cols_offd_Sext); if (num_procs > 1) { hypre_CSRMatrixDestroy(S_ext); S_ext = NULL; } /* Need to sort column indices in S and S_ext */ for(i = 0; i < num_variables; i++) { /* Re-Sort diag portion of Pattern, placing the diagonal entry in a * sorted position */ row_start = Pattern_diag_i[i]; row_end = Pattern_diag_i[i+1]; hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 ); /* Sort diag portion of S, noting that no diagonal entry */ /* S has not "data" array...it's just NULL */ row_start = S_diag_i[i]; row_end = S_diag_i[i+1]; hypre_qsort1(S_diag_j, S_diag_data, row_start, row_end-1 ); /* Sort offd portion of S */ /* S has no "data" array...it's just NULL */ row_start = S_offd_i[i]; row_end = S_offd_i[i+1]; hypre_qsort1(S_offd_j, S_offd_data, row_start, row_end-1 ); } /* Sort S_ext * num_cols_RAP_offd equals num_rows for S_ext*/ for(i = 0; i < num_cols_RAP_offd; i++) { /* Sort diag portion of S_ext */ row_start = S_ext_diag_i[i]; row_end = S_ext_diag_i[i+1]; hypre_qsort1(S_ext_diag_j, S_ext_diag_data, row_start, row_end-1 ); /* Sort offd portion of S_ext */ row_start = S_ext_offd_i[i]; row_end = S_ext_offd_i[i+1]; hypre_qsort1(S_ext_offd_j, S_ext_offd_data, row_start, row_end-1 ); } /* * Now, for the fun stuff -- Computing the Non-Galerkin Operator */ /* Initialize the ijmatrix, leveraging our knowledge of the nonzero * structure in Pattern */ ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP, first_col_diag_RAP, last_col_diag_RAP, &ijmatrix); ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR); rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); for(i = 0; i < num_variables; i++) { rownz[i] = 1.2*(Pattern_diag_i[i+1] - Pattern_diag_i[i]) + 1.2*(Pattern_offd_i[i+1] - Pattern_offd_i[i]); } HYPRE_IJMatrixSetRowSizes(ijmatrix, rownz); ierr += HYPRE_IJMatrixInitialize(ijmatrix); hypre_TFree(rownz, HYPRE_MEMORY_HOST); /* *For efficiency, we do a buffered IJAddToValues. * Here, we initialize the buffer and then initialize the buffer counters */ ijbuf_size = 1000; ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE); hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols ); if(sym_collapse) { ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE); ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE); hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols ); } /* * Eliminate Entries In RAP_diag * */ for(i = 0; i < num_variables; i++) { global_row = i+first_col_diag_RAP; row_start = RAP_diag_i[i]; row_end = RAP_diag_i[i+1]; has_row_ended = 0; /* Only do work if row has nonzeros */ if( row_start < row_end) { /* Grab pointer to current entry in Pattern_diag */ current_Pattern_j = Pattern_diag_i[i]; col_indx_Pattern = Pattern_diag_j[current_Pattern_j]; /* Grab this row's indices out of Pattern offd and diag. This will * be for computing index set intersections for lumping */ /* Ensure adequate length */ Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i]; if(Pattern_offd_indices_allocated_len < Pattern_offd_indices_len) { hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST); Pattern_offd_indices = hypre_CTAlloc(HYPRE_BigInt, Pattern_offd_indices_len, HYPRE_MEMORY_HOST); Pattern_offd_indices_allocated_len = Pattern_offd_indices_len; } /* Grab sub array from col_map, corresponding to the slice of Pattern_offd_j */ hypre_GrabSubArray(Pattern_offd_j, Pattern_offd_i[i], Pattern_offd_i[i+1]-1, col_map_offd_Pattern, Pattern_offd_indices); /* No need to grab info out of Pattern_diag_j[...], here we just start from * Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to * ignore the diagonal entry in Pattern, because we don't lump entries there */ if( Pattern_diag_j[Pattern_diag_i[i]] == i ) { Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]); Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1; } else { Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]); Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i]; } } for(j = row_start; j < row_end; j++) { col_indx_RAP = RAP_diag_j[j]; /* Ignore zero entries in RAP */ if( RAP_diag_data[j] != 0.0) { /* Don't change the diagonal, just write it */ if(col_indx_RAP == i) { /*#ifdef HY PRE_USING_OPENMP #pragma omp critical (IJAdd) #endif {*/ /* For efficiency, we do a buffered IJAddToValues. * A[global_row, global_row] += RAP_diag_data[j] */ hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row, RAP_diag_data[j] ); /*}*/ } /* The entry in RAP does not appear in Pattern, so LUMP it */ else if( (col_indx_RAP < col_indx_Pattern) || has_row_ended) { /* Lump entry (i, col_indx_RAP) in RAP */ /* Grab the indices for row col_indx_RAP of S_offd and diag. This will * be for computing lumping locations */ S_offd_indices_len = S_offd_i[col_indx_RAP+1] - S_offd_i[col_indx_RAP]; if(S_offd_indices_allocated_len < S_offd_indices_len) { hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST); S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST); S_offd_indices_allocated_len = S_offd_indices_len; } /* Grab sub array from col_map, corresponding to the slice of S_offd_j */ hypre_GrabSubArray(S_offd_j, S_offd_i[col_indx_RAP], S_offd_i[col_indx_RAP+1]-1, col_map_offd_S, S_offd_indices); /* No need to grab info out of S_diag_j[...], here we just start from * S_diag_i[col_indx_RAP] and end at index S_diag_i[col_indx_RAP+1] - 1 */ /* Intersect the diag and offd pieces, remembering that the * diag array will need to have the offset +first_col_diag_RAP */ cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len); if(offd_intersection_allocated_len < cnt) { hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST); hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST); offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST); offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST); offd_intersection_allocated_len = cnt; } /* This intersection also tracks S_offd_data and assumes that * S_offd_indices is the first argument here */ hypre_IntersectTwoBigArrays(S_offd_indices, &(S_offd_data[ S_offd_i[col_indx_RAP] ]), S_offd_indices_len, Pattern_offd_indices, Pattern_offd_indices_len, offd_intersection, offd_intersection_data, &offd_intersection_len); /* Now, intersect the indices for the diag block. Note that S_diag_j does * not have a diagonal entry, so no lumping occurs to the diagonal. */ cnt = hypre_max(Pattern_diag_indices_len, S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP] ); if(diag_intersection_allocated_len < cnt) { hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST); hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST); diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST); diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST); diag_intersection_allocated_len = cnt; } /* There is no diagonal entry in first position of S */ hypre_IntersectTwoArrays( &(S_diag_j[S_diag_i[col_indx_RAP]]), &(S_diag_data[ S_diag_i[col_indx_RAP] ]), S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP], Pattern_indices_ptr, Pattern_diag_indices_len, diag_intersection, diag_intersection_data, &diag_intersection_len); /* Loop over these intersections, and lump a constant fraction of * RAP_diag_data[j] to each entry */ intersection_len = diag_intersection_len + offd_intersection_len; if(intersection_len > 0) { /* Sum the strength-of-connection values from row * col_indx_RAP in S, corresponding to the indices we are * collapsing to in row i This will give us our collapsing * weights. */ sum_strong_neigh = 0.0; for(k = 0; k < diag_intersection_len; k++) { sum_strong_neigh += fabs(diag_intersection_data[k]); } for(k = 0; k < offd_intersection_len; k++) { sum_strong_neigh += fabs(offd_intersection_data[k]); } sum_strong_neigh = RAP_diag_data[j]/sum_strong_neigh; /* When lumping with the diag_intersection, must offset column index */ for(k = 0; k < diag_intersection_len; k++) { lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh; diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh; neg_lump_value = -1.0 * lump_value; cnt = diag_intersection[k]+first_col_diag_RAP; /*#ifdef HY PRE_USING_OPENMP #pragma omp critical (IJAdd) #endif {*/ /* For efficiency, we do a buffered IJAddToValues. * A[global_row, cnt] += RAP_diag_data[j] */ hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, lump_value ); if (lump_percent < 1.0) { /* Preserve row sum by updating diagonal */ hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row, diagonal_lump_value ); } /* Update mirror entries, if symmetric collapsing */ if(sym_collapse) { /* Update mirror entry */ hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, cnt, global_row, lump_value ); /* Update mirror entry diagonal */ hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, cnt, cnt, neg_lump_value ); } /*}*/ } /* The offd_intersection has global column indices, i.e., the * col_map arrays contain global indices */ for(k = 0; k < offd_intersection_len; k++) { lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh; diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh; neg_lump_value = -1.0 * lump_value; hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, offd_intersection[k], lump_value ); if (lump_percent < 1.0) { hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row, diagonal_lump_value ); } /* Update mirror entries, if symmetric collapsing */ if (sym_collapse) { hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, offd_intersection[k], global_row, lump_value ); hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, offd_intersection[k], offd_intersection[k], neg_lump_value ); } } } /* If intersection is empty, do not eliminate entry */ else { /* Don't forget to update mirror entry if collapsing symmetrically */ if (sym_collapse) { lump_value = 0.5*RAP_diag_data[j]; } else { lump_value = RAP_diag_data[j]; } cnt = col_indx_RAP+first_col_diag_RAP; hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, lump_value ); if (sym_collapse) { hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, cnt, global_row, lump_value ); } } } /* The entry in RAP appears in Pattern, so keep it */ else if(col_indx_RAP == col_indx_Pattern) { cnt = col_indx_RAP+first_col_diag_RAP; hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, RAP_diag_data[j] ); /* Only go to the next entry in Pattern, if this is not the end of a row */ if( current_Pattern_j < Pattern_diag_i[i+1]-1 ) { current_Pattern_j += 1; col_indx_Pattern = Pattern_diag_j[current_Pattern_j]; } else { has_row_ended = 1;} } /* Increment col_indx_Pattern, and repeat this loop iter for current * col_ind_RAP value */ else if(col_indx_RAP > col_indx_Pattern) { for(; current_Pattern_j < Pattern_diag_i[i+1]; current_Pattern_j++) { col_indx_Pattern = Pattern_diag_j[current_Pattern_j]; if(col_indx_RAP <= col_indx_Pattern) { break;} } /* If col_indx_RAP is still greater (i.e., we've reached a row end), then * we need to lump everything else in this row */ if(col_indx_RAP > col_indx_Pattern) { has_row_ended = 1; } /* Decrement j, in order to repeat this loop iteration for the current * col_indx_RAP value */ j--; } } } } /* * Eliminate Entries In RAP_offd * Structure of this for-loop is very similar to the RAP_diag for-loop * But, not so similar that these loops should be combined into a single fuction. * */ if(num_cols_RAP_offd) { for(i = 0; i < num_variables; i++) { global_row = i+first_col_diag_RAP; row_start = RAP_offd_i[i]; row_end = RAP_offd_i[i+1]; has_row_ended = 0; /* Only do work if row has nonzeros */ if( row_start < row_end) { current_Pattern_j = Pattern_offd_i[i]; Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i]; if( (Pattern_offd_j != NULL) && (Pattern_offd_indices_len > 0) ) { col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ]; } else { /* if Pattern_offd_j is not allocated or this is a zero length row, then all entries need to be lumped. This is an analagous situation to has_row_ended=1. */ col_indx_Pattern = -1; has_row_ended = 1; } /* Grab this row's indices out of Pattern offd and diag. This will * be for computing index set intersections for lumping. The above * loop over RAP_diag ensures adequate length of Pattern_offd_indices */ /* Ensure adequate length */ hypre_GrabSubArray(Pattern_offd_j, Pattern_offd_i[i], Pattern_offd_i[i+1]-1, col_map_offd_Pattern, Pattern_offd_indices); /* No need to grab info out of Pattern_diag_j[...], here we just start from * Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to * ignore the diagonal entry in Pattern, because we don't lump entries there */ if( Pattern_diag_j[Pattern_diag_i[i]] == i ) { Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]); Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1; } else { Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]); Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i]; } } for(j = row_start; j < row_end; j++) { /* Ignore zero entries in RAP */ if( RAP_offd_data[j] != 0.0) { /* In general for all the offd_j arrays, we have to indirectly * index with the col_map_offd array to get a global index */ col_indx_RAP = col_map_offd_RAP[ RAP_offd_j[j] ]; /* The entry in RAP does not appear in Pattern, so LUMP it */ if( (col_indx_RAP < col_indx_Pattern) || has_row_ended) { /* The row_indx_Sext would be found with: row_indx_Sext = hypre_BinarySearch(col_map_offd_RAP, col_indx_RAP, num_cols_RAP_offd); But, we already know the answer to this with, */ row_indx_Sext = RAP_offd_j[j]; /* Grab the indices for row row_indx_Sext from the offd and diag parts. This will * be for computing lumping locations */ S_offd_indices_len = S_ext_offd_i[row_indx_Sext+1] - S_ext_offd_i[row_indx_Sext]; if(S_offd_indices_allocated_len < S_offd_indices_len) { hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST); S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST); S_offd_indices_allocated_len = S_offd_indices_len; } /* Grab sub array from col_map, corresponding to the slice of S_ext_offd_j */ hypre_GrabSubArray(S_ext_offd_j, S_ext_offd_i[row_indx_Sext], S_ext_offd_i[row_indx_Sext+1]-1, col_map_offd_Sext, S_offd_indices); /* No need to grab info out of S_ext_diag_j[...], here we just start from * S_ext_diag_i[row_indx_Sext] and end at index S_ext_diag_i[row_indx_Sext+1] - 1 */ /* Intersect the diag and offd pieces, remembering that the * diag array will need to have the offset +first_col_diag_RAP */ cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len); if(offd_intersection_allocated_len < cnt) { hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST); hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST); offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST); offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST); offd_intersection_allocated_len = cnt; } hypre_IntersectTwoBigArrays(S_offd_indices, &(S_ext_offd_data[ S_ext_offd_i[row_indx_Sext] ]), S_offd_indices_len, Pattern_offd_indices, Pattern_offd_indices_len, offd_intersection, offd_intersection_data, &offd_intersection_len); /* Now, intersect the indices for the diag block. */ cnt = hypre_max(Pattern_diag_indices_len, S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext] ); if(diag_intersection_allocated_len < cnt) { hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST); hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST); diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST); diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST); diag_intersection_allocated_len = cnt; } hypre_IntersectTwoArrays( &(S_ext_diag_j[S_ext_diag_i[row_indx_Sext]]), &(S_ext_diag_data[ S_ext_diag_i[row_indx_Sext] ]), S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext], Pattern_indices_ptr, Pattern_diag_indices_len, diag_intersection, diag_intersection_data, &diag_intersection_len); /* Loop over these intersections, and lump a constant fraction of * RAP_offd_data[j] to each entry */ intersection_len = diag_intersection_len + offd_intersection_len; if(intersection_len > 0) { /* Sum the strength-of-connection values from row * row_indx_Sext in S, corresponding to the indices we are * collapsing to in row i. This will give us our collapsing * weights. */ sum_strong_neigh = 0.0; for(k = 0; k < diag_intersection_len; k++) { sum_strong_neigh += fabs(diag_intersection_data[k]); } for(k = 0; k < offd_intersection_len; k++) { sum_strong_neigh += fabs(offd_intersection_data[k]); } sum_strong_neigh = RAP_offd_data[j]/sum_strong_neigh; /* When lumping with the diag_intersection, must offset column index */ for(k = 0; k < diag_intersection_len; k++) { lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh; diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh; neg_lump_value = -1.0 * lump_value; cnt = diag_intersection[k]+first_col_diag_RAP; hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, lump_value ); if (lump_percent < 1.0) { hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row, diagonal_lump_value ); } /* Update mirror entries, if symmetric collapsing */ if (sym_collapse) { hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, cnt, global_row, lump_value); hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, cnt, cnt, neg_lump_value ); } } /* The offd_intersection has global column indices, i.e., the * col_map arrays contain global indices */ for(k = 0; k < offd_intersection_len; k++) { lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh; diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh; neg_lump_value = -1.0 * lump_value; hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, offd_intersection[k], lump_value ); if (lump_percent < 1.0) { hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row, diagonal_lump_value ); } /* Update mirror entries, if symmetric collapsing */ if (sym_collapse) { hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, offd_intersection[k], global_row, lump_value ); hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, offd_intersection[k], offd_intersection[k], neg_lump_value ); } } } /* If intersection is empty, do not eliminate entry */ else { /* Don't forget to update mirror entry if collapsing symmetrically */ if (sym_collapse) { lump_value = 0.5*RAP_offd_data[j]; } else { lump_value = RAP_offd_data[j]; } hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP, lump_value ); if (sym_collapse) { hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols, col_indx_RAP, global_row, lump_value ); } } } /* The entry in RAP appears in Pattern, so keep it */ else if (col_indx_RAP == col_indx_Pattern) { /* For the offd structure, col_indx_RAP is a global dof number */ hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP, RAP_offd_data[j]); /* Only go to the next entry in Pattern, if this is not the end of a row */ if( current_Pattern_j < Pattern_offd_i[i+1]-1 ) { current_Pattern_j += 1; col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ]; } else { has_row_ended = 1;} } /* Increment col_indx_Pattern, and repeat this loop iter for current * col_ind_RAP value */ else if(col_indx_RAP > col_indx_Pattern) { for(; current_Pattern_j < Pattern_offd_i[i+1]; current_Pattern_j++) { col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ]; if(col_indx_RAP <= col_indx_Pattern) { break;} } /* If col_indx_RAP is still greater (i.e., we've reached a row end), then * we need to lump everything else in this row */ if(col_indx_RAP > col_indx_Pattern) { has_row_ended = 1; } /* Decrement j, in order to repeat this loop iteration for the current * col_indx_RAP value */ j--; } } } } } /* For efficiency, we do a buffered IJAddToValues. * This empties the buffer of any remaining values */ hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter, &ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols); if(sym_collapse) hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter, &ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols); /* Assemble non-Galerkin Matrix, and overwrite current RAP*/ ierr += HYPRE_IJMatrixAssemble (ijmatrix); ierr += HYPRE_IJMatrixGetObject( ijmatrix, (void**) RAP_ptr); /* Optional diagnostic matrix printing */ if (0) { hypre_sprintf(filename, "Pattern_%d.ij", global_num_vars); hypre_ParCSRMatrixPrintIJ(Pattern, 0, 0, filename); hypre_sprintf(filename, "Strength_%d.ij", global_num_vars); hypre_ParCSRMatrixPrintIJ(S, 0, 0, filename); hypre_sprintf(filename, "RAP_%d.ij", global_num_vars); hypre_ParCSRMatrixPrintIJ(RAP, 0, 0, filename); hypre_sprintf(filename, "RAPc_%d.ij", global_num_vars); hypre_ParCSRMatrixPrintIJ(*RAP_ptr, 0, 0, filename); hypre_sprintf(filename, "AP_%d.ij", global_num_vars); hypre_ParCSRMatrixPrintIJ(AP, 0, 0, filename); } /* Free matrices and variables and arrays */ hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE); if(sym_collapse) { hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE); hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE); } hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST); hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST); hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST); hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST); hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_data, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_Sext) { hypre_TFree(col_map_offd_Sext, HYPRE_MEMORY_HOST); } if (0) /*(strong_threshold > S_commpkg_switch)*/ { hypre_TFree(col_offd_S_to_A, HYPRE_MEMORY_HOST); } ierr += hypre_ParCSRMatrixDestroy(Pattern); ierr += hypre_ParCSRMatrixDestroy(RAP); ierr += hypre_ParCSRMatrixDestroy(S); ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, -1); ierr += HYPRE_IJMatrixDestroy(ijmatrix); /*end_time = hypre_MPI_Wtime(); if(my_id == 0) { fprintf(stdout, "NonGalerkin Time: %1.2e\n", end_time-start_time); } */ return ierr; }
RingSettlementCircuit.h
#ifndef _RINGSETTLEMENTCIRCUIT_H_ #define _RINGSETTLEMENTCIRCUIT_H_ #include "Circuit.h" #include "../Utils/Constants.h" #include "../Utils/Data.h" #include "../Utils/Utils.h" #include "../Gadgets/MatchingGadgets.h" #include "../Gadgets/AccountGadgets.h" #include "../Gadgets/TradingHistoryGadgets.h" #include "../Gadgets/MathGadgets.h" #include "ethsnarks.hpp" #include "utils.hpp" #include "gadgets/subadd.hpp" using namespace ethsnarks; namespace Loopring { // Transforms the DA data for ring settlements class TransformRingSettlementDataGadget : public GadgetT { public: const unsigned int ringSize = 21 * 8; VariableArrayT data; Bitstream transformedData; unsigned int numRings; std::vector<XorArrayGadget> xorGadgets; TransformRingSettlementDataGadget( ProtoboardT& pb, const std::string& prefix ) : GadgetT(pb, prefix) { numRings = 0; } VariableArrayT result() { return flatten(transformedData.data); } void generate_r1cs_witness() { for (unsigned int i = 0; i < xorGadgets.size(); i++) { xorGadgets[i].generate_r1cs_witness(); } } void generate_r1cs_constraints(unsigned int numRings, const VariableArrayT& data) { this->numRings = numRings; this->data = data; assert(numRings > 0); assert(numRings * ringSize == data.size()); // XOR compress Bitstream compressedData; compressedData.add(subArray(data, 0, numRings * ringSize)); /*for (unsigned int i = 1; i < numRings; i++) { unsigned int previousRingStart = (i - 1) * ringSize; unsigned int ringStart = i * ringSize; xorGadgets.emplace_back(pb, subArray(data, previousRingStart, 5 * 8), subArray(data, ringStart, 5 * 8), std::string("xor_") + std::to_string(i)); xorGadgets.back().generate_r1cs_constraints(); compressedData.add(xorGadgets.back().result()); compressedData.add(subArray(data, ringStart + 5 * 8, ringSize - 5 * 8)); }*/ // Transform struct Range { unsigned int offset; unsigned int length; }; std::vector<std::vector<Range>> ranges; ranges.push_back({{0, 32}}); // orderA.tradeHistoryData + orderB.tradeHistoryData ranges.push_back({{32, 48}}); // orderA.accountID + orderB.accountID ranges.push_back({{80, 24}}); // orderA.tokenS + orderB.tokenS ranges.push_back({{104, 48}}); // orderA.fillS + orderB.fillS ranges.push_back({{152, 8}}); // orderA.data ranges.push_back({{160, 8}}); // orderB.data for (const std::vector<Range>& subRanges : ranges) { for (unsigned int i = 0; i < numRings; i++) { for (const Range& subRange : subRanges) { unsigned int ringStart = i * ringSize; transformedData.add(subArray(flatten(compressedData.data), ringStart + subRange.offset, subRange.length)); } } } } }; class RingSettlementGadget : public GadgetT { public: const Constants& constants; // Orders OrderGadget orderA; OrderGadget orderB; // Balances DynamicVariableGadget balanceS_A; DynamicVariableGadget balanceB_A; DynamicVariableGadget balanceS_B; DynamicVariableGadget balanceB_B; DynamicVariableGadget balanceA_P; DynamicVariableGadget balanceB_P; DynamicVariableGadget balanceA_O; DynamicVariableGadget balanceB_O; // Initial trading history roots const VariableT tradingHistoryRootA_O; const VariableT tradingHistoryRootB_O; // Order fills FloatGadget fillS_A; FloatGadget fillS_B; // Match orders OrderMatchingGadget orderMatching; // Calculate fees FeeCalculatorGadget feeCalculatorA; FeeCalculatorGadget feeCalculatorB; /* Token Transfers */ // Actual trade TransferGadget fillBB_from_balanceSA_to_balanceBB; TransferGadget fillSB_from_balanceSB_to_balanceBA; // Fees TransferGadget feeA_from_balanceBA_to_balanceAO; TransferGadget feeB_from_balanceBB_to_balanceBO; // Rebates TransferGadget rebateA_from_balanceAO_to_balanceBA; TransferGadget rebateB_from_balanceBO_to_balanceBB; // Protocol fees TransferGadget protocolFeeA_from_balanceAO_to_balanceAP; TransferGadget protocolFeeB_from_balanceBO_to_balanceBP; // Update UserA UpdateTradeHistoryGadget updateTradeHistory_A; UpdateBalanceGadget updateBalanceS_A; UpdateBalanceGadget updateBalanceB_A; UpdateAccountGadget updateAccount_A; // Update UserB UpdateTradeHistoryGadget updateTradeHistory_B; UpdateBalanceGadget updateBalanceS_B; UpdateBalanceGadget updateBalanceB_B; UpdateAccountGadget updateAccount_B; // Update Protocol pool UpdateBalanceGadget updateBalanceA_P; UpdateBalanceGadget updateBalanceB_P; // Update Operator UpdateBalanceGadget updateBalanceA_O; UpdateBalanceGadget updateBalanceB_O; RingSettlementGadget( ProtoboardT& pb, const jubjub::Params& params, const Constants& _constants, const VariableT& exchangeID, const VariableT& accountsRoot, const VariableT& timestamp, const VariableT& protocolTakerFeeBips, const VariableT& protocolMakerFeeBips, const VariableT& protocolBalancesRoot, const VariableT& operatorBalancesRoot, const std::string& prefix ) : GadgetT(pb, prefix), constants(_constants), // Orders orderA(pb, params, constants, exchangeID, FMT(prefix, ".orderA")), orderB(pb, params, constants, exchangeID, FMT(prefix, ".orderB")), // Balances balanceS_A(pb, orderA.balanceSBefore.balance, FMT(prefix, ".balanceS_A")), balanceB_A(pb, orderA.balanceBBefore.balance, FMT(prefix, ".balanceB_A")), balanceS_B(pb, orderB.balanceSBefore.balance, FMT(prefix, ".balanceS_B")), balanceB_B(pb, orderB.balanceBBefore.balance, FMT(prefix, ".balanceB_B")), balanceA_P(pb, FMT(prefix, ".balanceA_P")), balanceB_P(pb, FMT(prefix, ".balanceB_P")), balanceA_O(pb, FMT(prefix, ".balanceA_O")), balanceB_O(pb, FMT(prefix, ".balanceB_O")), // Initial trading history roots tradingHistoryRootA_O(make_variable(pb, FMT(prefix, ".tradingHistoryRootA_O"))), tradingHistoryRootB_O(make_variable(pb, FMT(prefix, ".tradingHistoryRootB_O"))), // Order fills fillS_A(pb, constants, Float24Encoding, FMT(prefix, ".fillS_A")), fillS_B(pb, constants, Float24Encoding, FMT(prefix, ".fillS_B")), // Match orders orderMatching(pb, constants, timestamp, orderA, orderB, fillS_A.value(), fillS_B.value(), FMT(prefix, ".orderMatching")), // Calculate fees feeCalculatorA(pb, constants, fillS_B.value(), protocolTakerFeeBips, orderA.feeBips.packed, orderA.rebateBips.packed, FMT(prefix, ".feeCalculatorA")), feeCalculatorB(pb, constants, fillS_A.value(), protocolMakerFeeBips, orderB.feeBips.packed, orderB.rebateBips.packed, FMT(prefix, ".feeCalculatorB")), /* Token Transfers */ // Actual trade fillBB_from_balanceSA_to_balanceBB(pb, balanceS_A, balanceB_B, fillS_A.value(), FMT(prefix, ".fillBB_from_balanceSA_to_balanceBB")), fillSB_from_balanceSB_to_balanceBA(pb, balanceS_B, balanceB_A, fillS_B.value(), FMT(prefix, ".fillSB_from_balanceSB_to_balanceBA")), // Fees feeA_from_balanceBA_to_balanceAO(pb, balanceB_A, balanceA_O, feeCalculatorA.getFee(), FMT(prefix, ".feeA_from_balanceBA_to_balanceAO")), feeB_from_balanceBB_to_balanceBO(pb, balanceB_B, balanceB_O, feeCalculatorB.getFee(), FMT(prefix, ".feeB_from_balanceBB_to_balanceBO")), // Rebates rebateA_from_balanceAO_to_balanceBA(pb, balanceA_O, balanceB_A, feeCalculatorA.getRebate(), FMT(prefix, ".rebateA_from_balanceAO_to_balanceBA")), rebateB_from_balanceBO_to_balanceBB(pb, balanceB_O, balanceB_B, feeCalculatorB.getRebate(), FMT(prefix, ".rebateB_from_balanceBO_to_balanceBB")), // Protocol fees protocolFeeA_from_balanceAO_to_balanceAP(pb, balanceA_O, balanceA_P, feeCalculatorA.getProtocolFee(), FMT(prefix, ".protocolFeeA_from_balanceAO_to_balanceAP")), protocolFeeB_from_balanceBO_to_balanceBP(pb, balanceB_O, balanceB_P, feeCalculatorB.getProtocolFee(), FMT(prefix, ".protocolFeeB_from_balanceBO_to_balanceBP")), // Update UserA updateTradeHistory_A(pb, orderA.balanceSBefore.tradingHistory, subArray(orderA.orderID.bits, 0, NUM_BITS_TRADING_HISTORY), {orderA.tradeHistoryBefore.filled, orderA.tradeHistoryBefore.orderID}, {orderMatching.getFilledAfter_A(), orderA.orderID.packed}, FMT(prefix, ".updateTradeHistory_A")), updateBalanceS_A(pb, orderA.accountBefore.balancesRoot, orderA.tokenS.bits, {balanceS_A.front(), orderA.balanceSBefore.tradingHistory}, {balanceS_A.back(), updateTradeHistory_A.result()}, FMT(prefix, ".updateBalanceS_A")), updateBalanceB_A(pb, updateBalanceS_A.result(), orderA.tokenB.bits, {balanceB_A.front(), orderA.balanceBBefore.tradingHistory}, {balanceB_A.back(), orderA.balanceBBefore.tradingHistory}, FMT(prefix, ".updateBalanceB_A")), updateAccount_A(pb, accountsRoot, orderA.accountID.bits, {orderA.accountBefore.publicKey.x, orderA.accountBefore.publicKey.y, orderA.accountBefore.nonce, orderA.accountBefore.balancesRoot}, {orderA.accountBefore.publicKey.x, orderA.accountBefore.publicKey.y, orderA.accountBefore.nonce, updateBalanceB_A.result()}, FMT(prefix, ".updateAccount_A")), // Update UserB updateTradeHistory_B(pb, orderB.balanceSBefore.tradingHistory, subArray(orderB.orderID.bits, 0, NUM_BITS_TRADING_HISTORY), {orderB.tradeHistoryBefore.filled, orderB.tradeHistoryBefore.orderID}, {orderMatching.getFilledAfter_B(), orderB.orderID.packed}, FMT(prefix, ".updateTradeHistory_B")), updateBalanceS_B(pb, orderB.accountBefore.balancesRoot, orderB.tokenS.bits, {balanceS_B.front(), orderB.balanceSBefore.tradingHistory}, {balanceS_B.back(), updateTradeHistory_B.result()}, FMT(prefix, ".updateBalanceS_B")), updateBalanceB_B(pb, updateBalanceS_B.result(), orderB.tokenB.bits, {balanceB_B.front(), orderB.balanceBBefore.tradingHistory}, {balanceB_B.back(), orderB.balanceBBefore.tradingHistory}, FMT(prefix, ".updateBalanceB_B")), updateAccount_B(pb, updateAccount_A.result(), orderB.accountID.bits, {orderB.accountBefore.publicKey.x, orderB.accountBefore.publicKey.y, orderB.accountBefore.nonce, orderB.accountBefore.balancesRoot}, {orderB.accountBefore.publicKey.x, orderB.accountBefore.publicKey.y, orderB.accountBefore.nonce, updateBalanceB_B.result()}, FMT(prefix, ".updateAccount_B")), // Update Protocol pool updateBalanceA_P(pb, protocolBalancesRoot, orderA.tokenB.bits, {balanceA_P.front(), constants.emptyTradeHistory}, {balanceA_P.back(), constants.emptyTradeHistory}, FMT(prefix, ".updateBalanceA_P")), updateBalanceB_P(pb, updateBalanceA_P.result(), orderB.tokenB.bits, {balanceB_P.front(), constants.emptyTradeHistory}, {balanceB_P.back(), constants.emptyTradeHistory}, FMT(prefix, ".updateBalanceB_P")), // Update Operator updateBalanceA_O(pb, operatorBalancesRoot, orderA.tokenB.bits, {balanceA_O.front(), tradingHistoryRootA_O}, {balanceA_O.back(), tradingHistoryRootA_O}, FMT(prefix, ".updateBalanceA_O")), updateBalanceB_O(pb, updateBalanceA_O.result(), orderB.tokenB.bits, {balanceB_O.front(), tradingHistoryRootB_O}, {balanceB_O.back(), tradingHistoryRootB_O}, FMT(prefix, ".updateBalanceB_O")) { } void generate_r1cs_witness(const RingSettlement& ringSettlement) { // Orders orderA.generate_r1cs_witness(ringSettlement.ring.orderA, ringSettlement.accountUpdate_A.before, ringSettlement.balanceUpdateS_A.before, ringSettlement.balanceUpdateB_A.before, ringSettlement.tradeHistoryUpdate_A.before); orderB.generate_r1cs_witness(ringSettlement.ring.orderB, ringSettlement.accountUpdate_B.before, ringSettlement.balanceUpdateS_B.before, ringSettlement.balanceUpdateB_B.before, ringSettlement.tradeHistoryUpdate_B.before); // Balances before balanceA_P.generate_r1cs_witness(ringSettlement.balanceUpdateA_P.before.balance); balanceB_P.generate_r1cs_witness(ringSettlement.balanceUpdateB_P.before.balance); balanceA_O.generate_r1cs_witness(ringSettlement.balanceUpdateA_O.before.balance); balanceB_O.generate_r1cs_witness(ringSettlement.balanceUpdateB_O.before.balance); // Trading history roots before pb.val(tradingHistoryRootA_O) = ringSettlement.balanceUpdateA_O.before.tradingHistoryRoot; pb.val(tradingHistoryRootB_O) = ringSettlement.balanceUpdateB_O.before.tradingHistoryRoot; // Order fills fillS_A.generate_r1cs_witness(ringSettlement.ring.fillS_A); fillS_B.generate_r1cs_witness(ringSettlement.ring.fillS_B); // Match orders orderMatching.generate_r1cs_witness(); // Calculate fees feeCalculatorA.generate_r1cs_witness(); feeCalculatorB.generate_r1cs_witness(); /* Token Transfers */ // Actual trade fillBB_from_balanceSA_to_balanceBB.generate_r1cs_witness(); fillSB_from_balanceSB_to_balanceBA.generate_r1cs_witness(); // Fees feeA_from_balanceBA_to_balanceAO.generate_r1cs_witness(); feeB_from_balanceBB_to_balanceBO.generate_r1cs_witness(); // Rebates rebateA_from_balanceAO_to_balanceBA.generate_r1cs_witness(); rebateB_from_balanceBO_to_balanceBB.generate_r1cs_witness(); // Protocol fees protocolFeeA_from_balanceAO_to_balanceAP.generate_r1cs_witness(); protocolFeeB_from_balanceBO_to_balanceBP.generate_r1cs_witness(); // Update UserA updateTradeHistory_A.generate_r1cs_witness(ringSettlement.tradeHistoryUpdate_A.proof); updateBalanceS_A.generate_r1cs_witness(ringSettlement.balanceUpdateS_A.proof); updateBalanceB_A.generate_r1cs_witness(ringSettlement.balanceUpdateB_A.proof); updateAccount_A.generate_r1cs_witness(ringSettlement.accountUpdate_A.proof); // Update UserB updateTradeHistory_B.generate_r1cs_witness(ringSettlement.tradeHistoryUpdate_B.proof); updateBalanceS_B.generate_r1cs_witness(ringSettlement.balanceUpdateS_B.proof); updateBalanceB_B.generate_r1cs_witness(ringSettlement.balanceUpdateB_B.proof); updateAccount_B.generate_r1cs_witness(ringSettlement.accountUpdate_B.proof); // Update Protocol pool updateBalanceA_P.generate_r1cs_witness(ringSettlement.balanceUpdateA_P.proof); updateBalanceB_P.generate_r1cs_witness(ringSettlement.balanceUpdateB_P.proof); // Update Operator updateBalanceA_O.generate_r1cs_witness(ringSettlement.balanceUpdateA_O.proof); updateBalanceB_O.generate_r1cs_witness(ringSettlement.balanceUpdateB_O.proof); } void generate_r1cs_constraints() { // Orders orderA.generate_r1cs_constraints(); orderB.generate_r1cs_constraints(); // Order fills fillS_A.generate_r1cs_constraints(); fillS_B.generate_r1cs_constraints(); // Match orders orderMatching.generate_r1cs_constraints(); // Calculate fees feeCalculatorA.generate_r1cs_constraints(); feeCalculatorB.generate_r1cs_constraints(); /* Token Transfers */ // Actual trade fillBB_from_balanceSA_to_balanceBB.generate_r1cs_constraints(); fillSB_from_balanceSB_to_balanceBA.generate_r1cs_constraints(); // Fees feeA_from_balanceBA_to_balanceAO.generate_r1cs_constraints(); feeB_from_balanceBB_to_balanceBO.generate_r1cs_constraints(); // Rebates rebateA_from_balanceAO_to_balanceBA.generate_r1cs_constraints(); rebateB_from_balanceBO_to_balanceBB.generate_r1cs_constraints(); // Protocol fees protocolFeeA_from_balanceAO_to_balanceAP.generate_r1cs_constraints(); protocolFeeB_from_balanceBO_to_balanceBP.generate_r1cs_constraints(); // Update UserA updateTradeHistory_A.generate_r1cs_constraints(); updateBalanceS_A.generate_r1cs_constraints(); updateBalanceB_A.generate_r1cs_constraints(); updateAccount_A.generate_r1cs_constraints(); // Update UserB updateTradeHistory_B.generate_r1cs_constraints(); updateBalanceS_B.generate_r1cs_constraints(); updateBalanceB_B.generate_r1cs_constraints(); updateAccount_B.generate_r1cs_constraints(); // Update Protocol fee pool updateBalanceA_P.generate_r1cs_constraints(); updateBalanceB_P.generate_r1cs_constraints(); // Update Operator updateBalanceA_O.generate_r1cs_constraints(); updateBalanceB_O.generate_r1cs_constraints(); } const std::vector<VariableArrayT> getPublicData() const { return { VariableArrayT(1, constants.zero), VariableArrayT(1, orderA.tradeHistory.getOverwrite()), subArray(orderA.orderID.bits, 0, NUM_BITS_TRADING_HISTORY), VariableArrayT(1, constants.zero), VariableArrayT(1, orderB.tradeHistory.getOverwrite()), subArray(orderB.orderID.bits, 0, NUM_BITS_TRADING_HISTORY), orderA.accountID.bits, orderB.accountID.bits, VariableArrayT(2, constants.zero), orderA.tokenS.bits, VariableArrayT(2, constants.zero), orderB.tokenS.bits, fillS_A.bits(), fillS_B.bits(), orderA.buy.bits, VariableArrayT(1, orderA.hasRebate()), orderA.feeOrRebateBips.bits, orderB.buy.bits, VariableArrayT(1, orderB.hasRebate()), orderB.feeOrRebateBips.bits, }; } const VariableT& getNewAccountsRoot() const { return updateAccount_B.result(); } const VariableT& getNewProtocolBalancesRoot() const { return updateBalanceB_P.result(); } const VariableT& getNewOperatorBalancesRoot() const { return updateBalanceB_O.result(); } }; class RingSettlementCircuit : public Circuit { public: PublicDataGadget publicData; Constants constants; jubjub::Params params; // State AccountGadget accountBefore_O; AccountGadget accountBefore_P; // Inputs DualVariableGadget exchangeID; DualVariableGadget merkleRootBefore; DualVariableGadget merkleRootAfter; DualVariableGadget timestamp; DualVariableGadget protocolTakerFeeBips; DualVariableGadget protocolMakerFeeBips; DualVariableGadget operatorAccountID; // Increment the nonce of the Operator AddGadget nonce_after; // Transform the ring data TransformRingSettlementDataGadget transformData; // Signature Poseidon_gadget_T<3, 1, 6, 51, 2, 1> hash; SignatureVerifier signatureVerifier; // Ring settlements bool onchainDataAvailability; unsigned int numRings; std::vector<RingSettlementGadget> ringSettlements; Bitstream dataAvailabityData; // Update Protocol pool std::unique_ptr<UpdateAccountGadget> updateAccount_P; // Update Operator std::unique_ptr<UpdateAccountGadget> updateAccount_O; RingSettlementCircuit(ProtoboardT& pb, const std::string& prefix) : Circuit(pb, prefix), publicData(pb, FMT(prefix, ".publicData")), constants(pb, FMT(prefix, ".constants")), // State accountBefore_O(pb, FMT(prefix, ".accountBefore_O")), accountBefore_P(pb, FMT(prefix, ".accountBefore_P")), // Inputs exchangeID(pb, NUM_BITS_EXCHANGE_ID, FMT(prefix, ".exchangeID")), merkleRootBefore(pb, 256, FMT(prefix, ".merkleRootBefore")), merkleRootAfter(pb, 256, FMT(prefix, ".merkleRootAfter")), timestamp(pb, NUM_BITS_TIMESTAMP, FMT(prefix, ".timestamp")), protocolTakerFeeBips(pb, NUM_BITS_PROTOCOL_FEE_BIPS, FMT(prefix, ".protocolTakerFeeBips")), protocolMakerFeeBips(pb, NUM_BITS_PROTOCOL_FEE_BIPS, FMT(prefix, ".protocolMakerFeeBips")), operatorAccountID(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".operatorAccountID")), // Increment the nonce of the Operator nonce_after(pb, accountBefore_O.nonce, constants.one, NUM_BITS_NONCE, FMT(prefix, ".nonce_after")), // Transform the ring data transformData(pb, FMT(prefix, ".transformData")), // Signature hash(pb, var_array({ publicData.publicInput, accountBefore_O.nonce }), FMT(this->annotation_prefix, ".hash")), signatureVerifier(pb, params, constants, accountBefore_O.publicKey, hash.result(), FMT(prefix, ".signatureVerifier")) { } void generateConstraints(bool onchainDataAvailability, unsigned int blockSize) override { this->onchainDataAvailability = onchainDataAvailability; this->numRings = blockSize; constants.generate_r1cs_constraints(); // Inputs exchangeID.generate_r1cs_constraints(true); merkleRootBefore.generate_r1cs_constraints(true); merkleRootAfter.generate_r1cs_constraints(true); timestamp.generate_r1cs_constraints(true); protocolTakerFeeBips.generate_r1cs_constraints(true); protocolMakerFeeBips.generate_r1cs_constraints(true); operatorAccountID.generate_r1cs_constraints(true); // Increment the nonce of the Operator nonce_after.generate_r1cs_constraints(); // Ring settlements ringSettlements.reserve(numRings); for (size_t j = 0; j < numRings; j++) { const VariableT ringAccountsRoot = (j == 0) ? merkleRootBefore.packed : ringSettlements.back().getNewAccountsRoot(); const VariableT& ringProtocolBalancesRoot = (j == 0) ? accountBefore_P.balancesRoot : ringSettlements.back().getNewProtocolBalancesRoot(); const VariableT& ringOperatorBalancesRoot = (j == 0) ? accountBefore_O.balancesRoot : ringSettlements.back().getNewOperatorBalancesRoot(); ringSettlements.emplace_back( pb, params, constants, exchangeID.packed, ringAccountsRoot, timestamp.packed, protocolTakerFeeBips.packed, protocolMakerFeeBips.packed, ringProtocolBalancesRoot, ringOperatorBalancesRoot, std::string("trade_") + std::to_string(j) ); ringSettlements.back().generate_r1cs_constraints(); if (onchainDataAvailability) { // Store data from ring settlement dataAvailabityData.add(ringSettlements.back().getPublicData()); } } // Update Protocol pool updateAccount_P.reset(new UpdateAccountGadget(pb, ringSettlements.back().getNewAccountsRoot(), constants.zeroAccount, {accountBefore_P.publicKey.x, accountBefore_P.publicKey.y, accountBefore_P.nonce, accountBefore_P.balancesRoot}, {accountBefore_P.publicKey.x, accountBefore_P.publicKey.y, accountBefore_P.nonce, ringSettlements.back().getNewProtocolBalancesRoot()}, FMT(annotation_prefix, ".updateAccount_P"))); updateAccount_P->generate_r1cs_constraints(); // Update Operator updateAccount_O.reset(new UpdateAccountGadget(pb, updateAccount_P->result(), operatorAccountID.bits, {accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, accountBefore_O.nonce, accountBefore_O.balancesRoot}, {accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, nonce_after.result(), ringSettlements.back().getNewOperatorBalancesRoot()}, FMT(annotation_prefix, ".updateAccount_O"))); updateAccount_O->generate_r1cs_constraints(); // Public data publicData.add(exchangeID.bits); publicData.add(merkleRootBefore.bits); publicData.add(merkleRootAfter.bits); publicData.add(timestamp.bits); publicData.add(protocolTakerFeeBips.bits); publicData.add(protocolMakerFeeBips.bits); if (onchainDataAvailability) { publicData.add(operatorAccountID.bits); // Transform the ring data // 压缩ringsettlement的数据,链下算一遍hash,然后publicdatahash传到链上之后会对两者进行校验,确保链下生成proof的ringsettlement数据和链上的数据一直 transformData.generate_r1cs_constraints(numRings, flattenReverse(dataAvailabityData.data)); publicData.add(reverse(transformData.result())); } publicData.generate_r1cs_constraints(); // Signature hash.generate_r1cs_constraints(); signatureVerifier.generate_r1cs_constraints(); // Check the new merkle root requireEqual(pb, updateAccount_O->result(), merkleRootAfter.packed, "newMerkleRoot"); } bool generateWitness(const RingSettlementBlock& block) { if (block.ringSettlements.size() != numRings) { std::cout << "Invalid number of rings: " << block.ringSettlements.size() << std::endl; return false; } constants.generate_r1cs_witness(); // State accountBefore_O.generate_r1cs_witness(block.accountUpdate_O.before); accountBefore_P.generate_r1cs_witness(block.accountUpdate_P.before); // Inputs exchangeID.generate_r1cs_witness(pb, block.exchangeID); merkleRootBefore.generate_r1cs_witness(pb, block.merkleRootBefore); merkleRootAfter.generate_r1cs_witness(pb, block.merkleRootAfter); timestamp.generate_r1cs_witness(pb, block.timestamp); protocolTakerFeeBips.generate_r1cs_witness(pb, block.protocolTakerFeeBips); protocolMakerFeeBips.generate_r1cs_witness(pb, block.protocolMakerFeeBips); operatorAccountID.generate_r1cs_witness(pb, block.operatorAccountID); // Increment the nonce of the Operator nonce_after.generate_r1cs_witness(); // Ring settlements #ifdef MULTICORE #pragma omp parallel for #endif for(unsigned int i = 0; i < block.ringSettlements.size(); i++) { ringSettlements[i].generate_r1cs_witness(block.ringSettlements[i]); } // Update Protocol pool updateAccount_P->generate_r1cs_witness(block.accountUpdate_P.proof); // Update Operator updateAccount_O->generate_r1cs_witness(block.accountUpdate_O.proof); // Transform the ring data // Onchain 操作 if (onchainDataAvailability) { transformData.generate_r1cs_witness(); } // Public data publicData.generate_r1cs_witness(); // Signature hash.generate_r1cs_witness(); signatureVerifier.generate_r1cs_witness(block.signature); return true; } bool generateWitness(const json& input) override { return generateWitness(input.get<Loopring::RingSettlementBlock>()); } BlockType getBlockType() override { return BlockType::RingSettlement; } unsigned int getBlockSize() override { return numRings; } void printInfo() override { std::cout << pb.num_constraints() << " constraints (" << (pb.num_constraints() / numRings) << "/ring)" << std::endl; } }; } #endif
GB_unaryop__ainv_uint8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_uint32 // op(A') function: GB_tran__ainv_uint8_uint32 // C type: uint8_t // A type: uint32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_uint32 ( uint8_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Q5.3-d.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <string.h> #define MAXIMO 100 int num_aleatorio() { int numero = random() % MAXIMO; return numero; } void geraMatriz(int * a, int n) { int i; for (i = 0; i < n; ++i) { a[i] = num_aleatorio(); } } void imprimeMatriz(int * a, int n) { int i; for (i = 0; i < n; ++i) { printf("%d ", a[i]); } printf("\n"); } int main(int argc, char* argv[]) { long int number_tosses, number_in_circle; int thread_count, i, j, n, count; srandom(0); thread_count = strtol(argv[1], NULL, 10); n = strtol(argv[2], NULL, 10); int * a = malloc(n* sizeof(int)); geraMatriz(a, n); //imprimeMatriz(a, n); int * temp = malloc(n* sizeof(int)); double start = omp_get_wtime(); #pragma omp parallel for num_threads(thread_count) \ default(none) private(i, j, count) shared(a, n, temp, thread_count) for (i = 0; i < n; i++) { count = 0; for (j = 0; j < n; j++) if (a[j] < a[i]) count++; else if (a[j] == a[i] && j < i) count++; temp[count] = a[i]; } memcpy ( a , temp, n * sizeof(int)); double finish = omp_get_wtime(); free(temp ); printf("Tempo estimado %e segundos\n", finish - start); //imprimeMatriz(a, n); return 0; } /* main */
ligra.h
// This code is part of the project "Ligra: A Lightweight Graph Processing // Framework for Shared Memory", presented at Principles and Practice of // Parallel Programming, 2013. // Copyright (c) 2013 Julian Shun and Guy Blelloch // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights (to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef LIGRA_H #define LIGRA_H #include <iostream> #include <fstream> #include <stdlib.h> #include <cstring> #include <string> #include <algorithm> #include <cassert> #include "parallel.h" #include "gettime.h" #include "timer.h" //timer from GAP #include "utils.h" #include "vertex.h" #include "compressedVertex.h" #include "vertexSubset.h" #include "graph.h" #include "IO.h" #include "parseCommandLine.h" #include "gettime.h" #include "index_map.h" #include "edgeMap_utils.h" using namespace std; //*****START FRAMEWORK***** typedef uint32_t flags; const flags no_output = 1; const flags pack_edges = 2; const flags sparse_no_filter = 4; const flags dense_forward = 8; const flags dense_parallel = 16; const flags remove_duplicates = 32; inline bool should_output(const flags& fl) { return !(fl & no_output); } const int dynChunkSz = 64; //chunk size for openmp's dynamic scheduling template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapDense(graph<vertex> GA, VS& vertexSubset, F &f, const flags fl) { using D = tuple<bool, data>; long n = GA.n; vertex *G = GA.V; if (should_output(fl)) { D* next = newA(D, n); auto g = get_emdense_gen<data>(next); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (long v=0; v<n; v++) { std::get<0>(next[v]) = 0; if (f.cond(v)) { G[v].decodeInNghBreakEarly(v, vertexSubset, f, g, fl & dense_parallel); } } return vertexSubsetData<data>(n, next); } else { auto g = get_emdense_nooutput_gen<data>(); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (long v=0; v<n; v++) { if (f.cond(v)) { G[v].decodeInNghBreakEarly(v, vertexSubset, f, g, fl & dense_parallel); } } return vertexSubsetData<data>(n); } } template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapDenseForward(graph<vertex> GA, VS& vertexSubset, F &f, const flags fl) { using D = tuple<bool, data>; long n = GA.n; vertex *G = GA.V; if (should_output(fl)) { D* next = newA(D, n); auto g = get_emdense_forward_gen<data>(next); parallel_for(long i=0;i<n;i++) { std::get<0>(next[i]) = 0; } #pragma omp parallel for schedule (dynamic, dynChunkSz) for (long i=0; i<n; i++) { if (vertexSubset.isIn(i)) { G[i].decodeOutNgh(i, f, g); } } return vertexSubsetData<data>(n, next); } else { auto g = get_emdense_forward_nooutput_gen<data>(); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (long i=0; i<n; i++) { if (vertexSubset.isIn(i)) { G[i].decodeOutNgh(i, f, g); } } return vertexSubsetData<data>(n); } } template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapSparse(graph<vertex>& GA, vertex* frontierVertices, VS& indices, uintT* degrees, uintT m, F &f, const flags fl) { using S = tuple<uintE, data>; long n = indices.n; S* outEdges; long outEdgeCount = 0; if (should_output(fl)) { uintT* offsets = degrees; outEdgeCount = sequence::plusScan(offsets, offsets, m); outEdges = newA(S, outEdgeCount); auto g = get_emsparse_gen<data>(outEdges); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (size_t i = 0; i < m; i++) { uintT v = indices.vtx(i), o = offsets[i]; vertex vert = frontierVertices[i]; vert.decodeOutNghSparse(v, o, f, g); } } else { auto g = get_emsparse_nooutput_gen<data>(); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (size_t i = 0; i < m; i++) { uintT v = indices.vtx(i); vertex vert = frontierVertices[i]; vert.decodeOutNghSparse(v, 0, f, g); } } if (should_output(fl)) { S* nextIndices = newA(S, outEdgeCount); if (fl & remove_duplicates) { if (GA.flags == NULL) { GA.flags = newA(uintE, n); parallel_for(long i=0;i<n;i++) { GA.flags[i]=UINT_E_MAX; } } auto get_key = [&] (size_t i) -> uintE& { return std::get<0>(outEdges[i]); }; remDuplicates(get_key, GA.flags, outEdgeCount, n); } auto p = [] (tuple<uintE, data>& v) { return std::get<0>(v) != UINT_E_MAX; }; size_t nextM = pbbs::filterf(outEdges, nextIndices, outEdgeCount, p); free(outEdges); return vertexSubsetData<data>(n, nextM, nextIndices); } else { return vertexSubsetData<data>(n); } } template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapSparse_no_filter(graph<vertex>& GA, vertex* frontierVertices, VS& indices, uintT* offsets, uintT m, F& f, const flags fl) { using S = tuple<uintE, data>; long n = indices.n; long outEdgeCount = sequence::plusScan(offsets, offsets, m); S* outEdges = newA(S, outEdgeCount); auto g = get_emsparse_no_filter_gen<data>(outEdges); // binary-search into scan to map workers->chunks size_t b_size = 10000; size_t n_blocks = nblocks(outEdgeCount, b_size); uintE* cts = newA(uintE, n_blocks+1); size_t* block_offs = newA(size_t, n_blocks+1); auto offsets_m = make_in_imap<uintT>(m, [&] (size_t i) { return offsets[i]; }); auto lt = [] (const uintT& l, const uintT& r) { return l < r; }; parallel_for(size_t i=0; i<n_blocks; i++) { size_t s_val = i*b_size; block_offs[i] = pbbs::binary_search(offsets_m, s_val, lt); } block_offs[n_blocks] = m; #pragma omp parallel for schedule (dynamic, dynChunkSz / 8) for (size_t i=0; i<n_blocks; i++) { if ((i == n_blocks-1) || block_offs[i] != block_offs[i+1]) { // start and end are offsets in [m] size_t start = block_offs[i]; size_t end = block_offs[i+1]; uintT start_o = offsets[start]; uintT k = start_o; for (size_t j=start; j<end; j++) { uintE v = indices.vtx(j); size_t num_in = frontierVertices[j].decodeOutNghSparseSeq(v, k, f, g); k += num_in; } cts[i] = (k - start_o); } else { cts[i] = 0; } } long outSize = sequence::plusScan(cts, cts, n_blocks); cts[n_blocks] = outSize; S* out = newA(S, outSize); parallel_for (size_t i=0; i<n_blocks; i++) { if ((i == n_blocks-1) || block_offs[i] != block_offs[i+1]) { size_t start = block_offs[i]; size_t start_o = offsets[start]; size_t out_off = cts[i]; size_t block_size = cts[i+1] - out_off; for (size_t j=0; j<block_size; j++) { out[out_off + j] = outEdges[start_o + j]; } } } free(outEdges); free(cts); free(block_offs); if (fl & remove_duplicates) { if (GA.flags == NULL) { GA.flags = newA(uintE, n); parallel_for(size_t i=0;i<n;i++) { GA.flags[i]=UINT_E_MAX; } } auto get_key = [&] (size_t i) -> uintE& { return std::get<0>(out[i]); }; remDuplicates(get_key, GA.flags, outSize, n); S* nextIndices = newA(S, outSize); auto p = [] (tuple<uintE, data>& v) { return std::get<0>(v) != UINT_E_MAX; }; size_t nextM = pbbs::filterf(out, nextIndices, outSize, p); free(out); return vertexSubsetData<data>(n, nextM, nextIndices); } return vertexSubsetData<data>(n, outSize, out); } // Decides on sparse or dense base on number of nonzeros in the active vertices. template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapData(graph<vertex>& GA, VS &vs, F f, intT threshold = -1, const flags& fl=0) { long numVertices = GA.n, numEdges = GA.m, m = vs.numNonzeros(); if(threshold == -1) threshold = numEdges/20; //default threshold vertex *G = GA.V; if (numVertices != vs.numRows()) { cout << "edgeMap: Sizes Don't match" << endl; abort(); } if (vs.size() == 0) return vertexSubsetData<data>(numVertices); vs.toSparse(); uintT* degrees = newA(uintT, m); vertex* frontierVertices = newA(vertex,m); {parallel_for (size_t i=0; i < m; i++) { uintE v_id = vs.vtx(i); vertex v = G[v_id]; degrees[i] = v.getOutDegree(); frontierVertices[i] = v; }} uintT outDegrees = sequence::plusReduce(degrees, m); if (outDegrees == 0) return vertexSubsetData<data>(numVertices); if (m + outDegrees > threshold) { vs.toDense(); free(degrees); free(frontierVertices); return (fl & dense_forward) ? edgeMapDenseForward<data, vertex, VS, F>(GA, vs, f, fl) : edgeMapDense<data, vertex, VS, F>(GA, vs, f, fl); } else { auto vs_out = (should_output(fl) && fl & sparse_no_filter) ? // only call snof when we output edgeMapSparse_no_filter<data, vertex, VS, F>(GA, frontierVertices, vs, degrees, vs.numNonzeros(), f, fl) : edgeMapSparse<data, vertex, VS, F>(GA, frontierVertices, vs, degrees, vs.numNonzeros(), f, fl); free(degrees); free(frontierVertices); return vs_out; } } // Regular edgeMap, where no extra data is stored per vertex. template <class vertex, class VS, class F> vertexSubset edgeMap(graph<vertex> GA, VS& vs, F f, intT threshold = -1, const flags& fl=0) { return edgeMapData<pbbs::empty>(GA, vs, f, threshold, fl); } /* General function to print stats about frontier size */ template <class VS> void frontierStats(VS& vs, long numVertices, bool KCore = false) { if (KCore) { double percent = (static_cast<double>(vs.size()) / static_cast<double>(numVertices)) * 100; if (vs.dense()) { std::cout << "PULL iteration. Frontier size = " << percent << std::endl; } else { std::cout << "PUSH iteration. Frontier size = " << percent << std::endl; } } return; } // Packs out the adjacency lists of all vertex in vs. A neighbor, ngh, is kept // in the new adjacency list if p(ngh) is true. // Weighted graphs are not yet supported, but this should be easy to do. template <class vertex, class P> vertexSubsetData<uintE> packEdges(graph<vertex>& GA, vertexSubset& vs, P& p, const flags& fl=0) { using S = tuple<uintE, uintE>; vs.toSparse(); vertex* G = GA.V; long m = vs.numNonzeros(); long n = vs.numRows(); if (vs.size() == 0) { return vertexSubsetData<uintE>(n); } auto degrees = array_imap<uintT>(m); granular_for(i, 0, m, (m > 2000), { uintE v = vs.vtx(i); degrees[i] = G[v].getOutDegree(); }); long outEdgeCount = pbbs::scan_add(degrees, degrees); S* outV; if (should_output(fl)) { outV = newA(S, vs.size()); } bool* bits = newA(bool, outEdgeCount); uintE* tmp1 = newA(uintE, outEdgeCount); uintE* tmp2 = newA(uintE, outEdgeCount); if (should_output(fl)) { parallel_for (size_t i=0; i<m; i++) { uintE v = vs.vtx(i); size_t offset = degrees[i]; auto bitsOff = &(bits[offset]); auto tmp1Off = &(tmp1[offset]); auto tmp2Off = &(tmp2[offset]); size_t ct = G[v].packOutNgh(v, p, bitsOff, tmp1Off, tmp2Off); outV[i] = make_tuple(v, ct); } } else { parallel_for (size_t i=0; i<m; i++) { uintE v = vs.vtx(i); size_t offset = degrees[i]; auto bitsOff = &(bits[offset]); auto tmp1Off = &(tmp1[offset]); auto tmp2Off = &(tmp2[offset]); size_t ct = G[v].packOutNgh(v, p, bitsOff, tmp1Off, tmp2Off); } } free(bits); free(tmp1); free(tmp2); if (should_output(fl)) { return vertexSubsetData<uintE>(n, m, outV); } else { return vertexSubsetData<uintE>(n); } } template <class vertex, class P> vertexSubsetData<uintE> edgeMapFilter(graph<vertex>& GA, vertexSubset& vs, P& p, const flags& fl=0) { vs.toSparse(); if (fl & pack_edges) { return packEdges<vertex, P>(GA, vs, p, fl); } vertex* G = GA.V; long m = vs.numNonzeros(); long n = vs.numRows(); using S = tuple<uintE, uintE>; if (vs.size() == 0) { return vertexSubsetData<uintE>(n); } S* outV; if (should_output(fl)) { outV = newA(S, vs.size()); } if (should_output(fl)) { parallel_for (size_t i=0; i<m; i++) { uintE v = vs.vtx(i); size_t ct = G[v].countOutNgh(v, p); outV[i] = make_tuple(v, ct); } } else { parallel_for (size_t i=0; i<m; i++) { uintE v = vs.vtx(i); size_t ct = G[v].countOutNgh(v, p); } } if (should_output(fl)) { return vertexSubsetData<uintE>(n, m, outV); } else { return vertexSubsetData<uintE>(n); } } //*****VERTEX FUNCTIONS***** template <class F, class VS, typename std::enable_if< !std::is_same<VS, vertexSubset>::value, int>::type=0 > void vertexMap(VS& V, F f) { size_t n = V.numRows(), m = V.numNonzeros(); if(V.dense()) { parallel_for(long i=0;i<n;i++) { if(V.isIn(i)) { f(i, V.ithData(i)); } } } else { parallel_for(long i=0;i<m;i++) { f(V.vtx(i), V.vtxData(i)); } } } template <class VS, class F, typename std::enable_if< std::is_same<VS, vertexSubset>::value, int>::type=0 > void vertexMap(VS& V, F f) { size_t n = V.numRows(), m = V.numNonzeros(); if(V.dense()) { parallel_for(long i=0;i<n;i++) { if(V.isIn(i)) { f(i); } } } else { parallel_for(long i=0;i<m;i++) { f(V.vtx(i)); } } } //Note: this is the version of vertexMap in which only a subset of the //input vertexSubset is returned template <class F> vertexSubset vertexFilter(vertexSubset V, F filter) { long n = V.numRows(), m = V.numNonzeros(); V.toDense(); bool* d_out = newA(bool,n); {parallel_for(long i=0;i<n;i++) d_out[i] = 0;} {parallel_for(long i=0;i<n;i++) if(V.d[i]) d_out[i] = filter(i);} return vertexSubset(n,d_out); } template <class F> vertexSubset vertexFilter2(vertexSubset V, F filter) { long n = V.numRows(), m = V.numNonzeros(); if (m == 0) { return vertexSubset(n); } bool* bits = newA(bool, m); V.toSparse(); {parallel_for(size_t i=0; i<m; i++) { uintE v = V.vtx(i); bits[i] = filter(v); }} auto v_imap = make_in_imap<uintE>(m, [&] (size_t i) { return V.vtx(i); }); auto bits_m = make_in_imap<bool>(m, [&] (size_t i) { return bits[i]; }); auto out = pbbs::pack(v_imap, bits_m); out.alloc = false; free(bits); return vertexSubset(n, out.size(), out.s); } template <class data, class F> vertexSubset vertexFilter2(vertexSubsetData<data> V, F filter) { long n = V.numRows(), m = V.numNonzeros(); if (m == 0) { return vertexSubset(n); } bool* bits = newA(bool, m); V.toSparse(); parallel_for(size_t i=0; i<m; i++) { auto t = V.vtxAndData(i); bits[i] = filter(std::get<0>(t), std::get<1>(t)); } auto v_imap = make_in_imap<uintE>(m, [&] (size_t i) { return V.vtx(i); }); auto bits_m = make_in_imap<bool>(m, [&] (size_t i) { return bits[i]; }); auto out = pbbs::pack(v_imap, bits_m); out.alloc = false; free(bits); return vertexSubset(n, out.size(), out.s); } //cond function that always returns true inline bool cond_true (intT d) { return 1; } template<class vertex> void Compute(graph<vertex>&, commandLine, pvector<uintE> &new_ids); int parallel_main(int argc, char* argv[]) { commandLine P(argc,argv," [-s] <inFile>"); char* iFile = P.getArgument(0); bool symmetric = P.getOptionValue("-s"); bool compressed = P.getOptionValue("-c"); bool binary = P.getOptionValue("-b"); bool mmap = P.getOptionValue("-m"); bool isPageRank = (P.getOptionIntValue("-pagerank", -1) == 1); bool isDenseWrite = (P.getOptionIntValue("-densewrite", -1) == 1); /* preprocessing options : 0 - outdegsort, 1 - indegsort, else - no-preprocessing */ int preprocess = P.getOptionIntValue("-preprocess", -1); //cout << "mmap = " << mmap << endl; long rounds = P.getOptionLongValue("-rounds",3); if (compressed) { assert(false); #if 0 if (symmetric) { graph<compressedSymmetricVertex> G = readCompressedGraph<compressedSymmetricVertex>(iFile,symmetric,mmap); //symmetric graph Compute(G,P); for(int r=0;r<rounds;r++) { startTime(); Compute(G,P); nextTime("Running time"); } G.del(); } else { graph<compressedAsymmetricVertex> G = readCompressedGraph<compressedAsymmetricVertex>(iFile,symmetric,mmap); //asymmetric graph Compute(G,P); if(G.transposed) G.transpose(); for(int r=0;r<rounds;r++) { startTime(); Compute(G,P); nextTime("Running time"); if(G.transposed) G.transpose(); } G.del(); } #endif } else { if (symmetric) { graph<symmetricVertex> G = readGraph<symmetricVertex>(iFile,compressed,symmetric,binary,mmap); //symmetric graph pvector<uintE> new_ids(G.n, UINT_E_MAX); if (preprocess == 0 || preprocess == 1) { graph<symmetricVertex> newG = preprocessGraph<symmetricVertex>(G, symmetric, (preprocess == 0), new_ids); G.del(); Compute(newG,P,new_ids); for(int r=0;r<rounds;r++) { //startTime(); Compute(newG,P,new_ids); //nextTime("Running time"); } newG.del(); } else { Compute(G,P,new_ids); for(int r=0;r<rounds;r++) { //startTime(); Compute(G,P,new_ids); //nextTime("Running time"); } G.del(); } } else { graph<asymmetricVertex> G = readGraph<asymmetricVertex>(iFile,compressed,symmetric,binary,mmap); //asymmetric graph pvector<uintE> new_ids(G.n, UINT_E_MAX); if (preprocess == 0 || preprocess == 1) { graph<asymmetricVertex> newG = preprocessGraph<asymmetricVertex>(G, symmetric, (preprocess == 0), new_ids, isPageRank, isDenseWrite); G.del(); Compute(newG,P,new_ids); if(newG.transposed) newG.transpose(); for(int r=0;r<rounds;r++) { //startTime(); Compute(newG,P,new_ids); if(newG.transposed) newG.transpose(); //nextTime("Running time"); } newG.del(); } else { Compute(G,P,new_ids); if(G.transposed) G.transpose(); for(int r=0;r<rounds;r++) { //startTime(); Compute(G,P,new_ids); if(G.transposed) G.transpose(); //nextTime("Running time"); } G.del(); } } } } #endif
gmx_hbond.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2008, The GROMACS development team, * check out http://www.gromacs.org for more information. * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <math.h> /*#define HAVE_NN_LOOPS*/ #include "gmx_omp.h" #include "statutil.h" #include "copyrite.h" #include "sysstuff.h" #include "txtdump.h" #include "futil.h" #include "tpxio.h" #include "physics.h" #include "macros.h" #include "gmx_fatal.h" #include "index.h" #include "smalloc.h" #include "vec.h" #include "xvgr.h" #include "gstat.h" #include "matio.h" #include "string2.h" #include "pbc.h" #include "correl.h" #include "gmx_ana.h" #include "geminate.h" typedef short int t_E; typedef int t_EEst; #define max_hx 7 typedef int t_hx[max_hx]; #define NRHXTYPES max_hx const char *hxtypenames[NRHXTYPES] = {"n-n", "n-n+1", "n-n+2", "n-n+3", "n-n+4", "n-n+5", "n-n>6"}; #define MAXHH 4 #ifdef GMX_OPENMP #define MASTER_THREAD_ONLY(threadNr) ((threadNr) == 0) #else #define MASTER_THREAD_ONLY(threadNr) ((threadNr) == (threadNr)) #endif /* -----------------------------------------*/ enum { gr0, gr1, grI, grNR }; enum { hbNo, hbDist, hbHB, hbNR, hbR2 }; enum { noDA, ACC, DON, DA, INGROUP }; enum { NN_NULL, NN_NONE, NN_BINARY, NN_1_over_r3, NN_dipole, NN_NR }; static const char *grpnames[grNR] = {"0", "1", "I" }; static gmx_bool bDebug = FALSE; #define HB_NO 0 #define HB_YES 1<<0 #define HB_INS 1<<1 #define HB_YESINS HB_YES|HB_INS #define HB_NR (1<<2) #define MAXHYDRO 4 #define ISHB(h) (((h) & 2) == 2) #define ISDIST(h) (((h) & 1) == 1) #define ISDIST2(h) (((h) & 4) == 4) #define ISACC(h) (((h) & 1) == 1) #define ISDON(h) (((h) & 2) == 2) #define ISINGRP(h) (((h) & 4) == 4) typedef struct { int nr; int maxnr; atom_id *atoms; } t_ncell; typedef struct { t_ncell d[grNR]; t_ncell a[grNR]; } t_gridcell; typedef int t_icell[grNR]; typedef atom_id h_id[MAXHYDRO]; typedef struct { int history[MAXHYDRO]; /* Has this hbond existed ever? If so as hbDist or hbHB or both. * Result is stored as a bitmap (1 = hbDist) || (2 = hbHB) */ /* Bitmask array which tells whether a hbond is present * at a given time. Either of these may be NULL */ int n0; /* First frame a HB was found */ int nframes, maxframes; /* Amount of frames in this hbond */ unsigned int **h; unsigned int **g; /* See Xu and Berne, JPCB 105 (2001), p. 11929. We define the * function g(t) = [1-h(t)] H(t) where H(t) is one when the donor- * acceptor distance is less than the user-specified distance (typically * 0.35 nm). */ } t_hbond; typedef struct { int nra, max_nra; atom_id *acc; /* Atom numbers of the acceptors */ int *grp; /* Group index */ int *aptr; /* Map atom number to acceptor index */ } t_acceptors; typedef struct { int nrd, max_nrd; int *don; /* Atom numbers of the donors */ int *grp; /* Group index */ int *dptr; /* Map atom number to donor index */ int *nhydro; /* Number of hydrogens for each donor */ h_id *hydro; /* The atom numbers of the hydrogens */ h_id *nhbonds; /* The number of HBs per H at current */ } t_donors; /* Tune this to match memory requirements. It should be a signed integer type, e.g. signed char.*/ #define PSTYPE int typedef struct { int len; /* The length of frame and p. */ int *frame; /* The frames at which transitio*/ PSTYPE *p; } t_pShift; typedef struct { /* Periodicity history. Used for the reversible geminate recombination. */ t_pShift **pHist; /* The periodicity of every hbond in t_hbdata->hbmap: * pHist[d][a]. We can safely assume that the same * periodic shift holds for all hydrogens of a da-pair. * * Nowadays it only stores TRANSITIONS, and not the shift at every frame. * That saves a LOT of memory, an hopefully kills a mysterious bug where * pHist gets contaminated. */ PSTYPE nper; /* The length of p2i */ ivec *p2i; /* Maps integer to periodic shift for a pair.*/ matrix P; /* Projection matrix to find the box shifts. */ int gemtype; /* enumerated type */ } t_gemPeriod; typedef struct { int nframes; int *Etot; /* Total energy for each frame */ t_E ****E; /* Energy estimate for [d][a][h][frame-n0] */ } t_hbEmap; typedef struct { gmx_bool bHBmap, bDAnr, bGem; int wordlen; /* The following arrays are nframes long */ int nframes, max_frames, maxhydro; int *nhb, *ndist; h_id *n_bound; real *time; t_icell *danr; t_hx *nhx; /* These structures are initialized from the topology at start up */ t_donors d; t_acceptors a; /* This holds a matrix with all possible hydrogen bonds */ int nrhb, nrdist; t_hbond ***hbmap; #ifdef HAVE_NN_LOOPS t_hbEmap hbE; #endif /* For parallelization reasons this will have to be a pointer. * Otherwise discrepancies may arise between the periodicity data * seen by different threads. */ t_gemPeriod *per; } t_hbdata; static void clearPshift(t_pShift *pShift) { if (pShift->len > 0) { sfree(pShift->p); sfree(pShift->frame); pShift->len = 0; } } static void calcBoxProjection(matrix B, matrix P) { const int vp[] = {XX, YY, ZZ}; int i, j; int m, n; matrix M, N, U; for (i = 0; i < 3; i++) { m = vp[i]; for (j = 0; j < 3; j++) { n = vp[j]; U[m][n] = i == j ? 1 : 0; } } m_inv(B, M); for (i = 0; i < 3; i++) { m = vp[i]; mvmul(M, U[m], P[m]); } transpose(P, N); } static void calcBoxDistance(matrix P, rvec d, ivec ibd) { /* returns integer distance in box coordinates. * P is the projection matrix from cartesian coordinates * obtained with calcBoxProjection(). */ int i; rvec bd; mvmul(P, d, bd); /* extend it by 0.5 in all directions since (int) rounds toward 0.*/ for (i = 0; i < 3; i++) { bd[i] = bd[i] + (bd[i] < 0 ? -0.5 : 0.5); } ibd[XX] = (int)bd[XX]; ibd[YY] = (int)bd[YY]; ibd[ZZ] = (int)bd[ZZ]; } /* Changed argument 'bMerge' into 'oneHB' below, * since -contact should cause maxhydro to be 1, * not just -merge. * - Erik Marklund May 29, 2006 */ static PSTYPE periodicIndex(ivec r, t_gemPeriod *per, gmx_bool daSwap) { /* Try to merge hbonds on the fly. That means that if the * acceptor and donor are mergable, then: * 1) store the hb-info so that acceptor id > donor id, * 2) add the periodic shift in pairs, so that [-x,-y,-z] is * stored in per.p2i[] whenever acceptor id < donor id. * Note that [0,0,0] should already be the first element of per.p2i * by the time this function is called. */ /* daSwap is TRUE if the donor and acceptor were swapped. * If so, then the negative vector should be used. */ PSTYPE i; if (per->p2i == NULL || per->nper == 0) { gmx_fatal(FARGS, "'per' not initialized properly."); } for (i = 0; i < per->nper; i++) { if (r[XX] == per->p2i[i][XX] && r[YY] == per->p2i[i][YY] && r[ZZ] == per->p2i[i][ZZ]) { return i; } } /* Not found apparently. Add it to the list! */ /* printf("New shift found: %i,%i,%i\n",r[XX],r[YY],r[ZZ]); */ #pragma omp critical { if (!per->p2i) { fprintf(stderr, "p2i not initialized. This shouldn't happen!\n"); snew(per->p2i, 1); } else { srenew(per->p2i, per->nper+2); } copy_ivec(r, per->p2i[per->nper]); (per->nper)++; /* Add the mirror too. It's rather likely that it'll be needed. */ per->p2i[per->nper][XX] = -r[XX]; per->p2i[per->nper][YY] = -r[YY]; per->p2i[per->nper][ZZ] = -r[ZZ]; (per->nper)++; } /* omp critical */ return per->nper - 1 - (daSwap ? 0 : 1); } static t_hbdata *mk_hbdata(gmx_bool bHBmap, gmx_bool bDAnr, gmx_bool oneHB, gmx_bool bGem, int gemmode) { t_hbdata *hb; snew(hb, 1); hb->wordlen = 8*sizeof(unsigned int); hb->bHBmap = bHBmap; hb->bDAnr = bDAnr; hb->bGem = bGem; if (oneHB) { hb->maxhydro = 1; } else { hb->maxhydro = MAXHYDRO; } snew(hb->per, 1); hb->per->gemtype = bGem ? gemmode : 0; return hb; } static void mk_hbmap(t_hbdata *hb, gmx_bool bTwo) { int i, j; snew(hb->hbmap, hb->d.nrd); for (i = 0; (i < hb->d.nrd); i++) { snew(hb->hbmap[i], hb->a.nra); if (hb->hbmap[i] == NULL) { gmx_fatal(FARGS, "Could not allocate enough memory for hbmap"); } for (j = 0; (j > hb->a.nra); j++) { hb->hbmap[i][j] = NULL; } } } /* Consider redoing pHist so that is only stores transitions between * periodicities and not the periodicity for all frames. This eats heaps of memory. */ static void mk_per(t_hbdata *hb) { int i, j; if (hb->bGem) { snew(hb->per->pHist, hb->d.nrd); for (i = 0; i < hb->d.nrd; i++) { snew(hb->per->pHist[i], hb->a.nra); if (hb->per->pHist[i] == NULL) { gmx_fatal(FARGS, "Could not allocate enough memory for per->pHist"); } for (j = 0; j < hb->a.nra; j++) { clearPshift(&(hb->per->pHist[i][j])); } } /* add the [0,0,0] shift to element 0 of p2i. */ snew(hb->per->p2i, 1); clear_ivec(hb->per->p2i[0]); hb->per->nper = 1; } } #ifdef HAVE_NN_LOOPS static void mk_hbEmap (t_hbdata *hb, int n0) { int i, j, k; hb->hbE.E = NULL; hb->hbE.nframes = 0; snew(hb->hbE.E, hb->d.nrd); for (i = 0; i < hb->d.nrd; i++) { snew(hb->hbE.E[i], hb->a.nra); for (j = 0; j < hb->a.nra; j++) { snew(hb->hbE.E[i][j], MAXHYDRO); for (k = 0; k < MAXHYDRO; k++) { hb->hbE.E[i][j][k] = NULL; } } } hb->hbE.Etot = NULL; } static void free_hbEmap (t_hbdata *hb) { int i, j, k; for (i = 0; i < hb->d.nrd; i++) { for (j = 0; j < hb->a.nra; j++) { for (k = 0; k < MAXHYDRO; k++) { sfree(hb->hbE.E[i][j][k]); } sfree(hb->hbE.E[i][j]); } sfree(hb->hbE.E[i]); } sfree(hb->hbE.E); sfree(hb->hbE.Etot); } static void addFramesNN(t_hbdata *hb, int frame) { #define DELTAFRAMES_HBE 10 int d, a, h, nframes; if (frame >= hb->hbE.nframes) { nframes = hb->hbE.nframes + DELTAFRAMES_HBE; srenew(hb->hbE.Etot, nframes); for (d = 0; d < hb->d.nrd; d++) { for (a = 0; a < hb->a.nra; a++) { for (h = 0; h < hb->d.nhydro[d]; h++) { srenew(hb->hbE.E[d][a][h], nframes); } } } hb->hbE.nframes += DELTAFRAMES_HBE; } } static t_E calcHbEnergy(int d, int a, int h, rvec x[], t_EEst EEst, matrix box, rvec hbox, t_donors *donors) { /* d - donor atom * a - acceptor atom * h - hydrogen * alpha - angle between dipoles * x[] - atomic positions * EEst - the type of energy estimate (see enum in hbplugin.h) * box - the box vectors \ * hbox - half box lengths _These two are only needed for the pbc correction */ t_E E; rvec dist; rvec dipole[2], xmol[3], xmean[2]; int i; real r, realE; if (d == a) { /* Self-interaction */ return NONSENSE_E; } switch (EEst) { case NN_BINARY: /* This is a simple binary existence function that sets E=1 whenever * the distance between the oxygens is equal too or less than 0.35 nm. */ rvec_sub(x[d], x[a], dist); pbc_correct_gem(dist, box, hbox); if (norm(dist) <= 0.35) { E = 1; } else { E = 0; } break; case NN_1_over_r3: /* Negative potential energy of a dipole. * E = -cos(alpha) * 1/r^3 */ copy_rvec(x[d], xmol[0]); /* donor */ copy_rvec(x[donors->hydro[donors->dptr[d]][0]], xmol[1]); /* hydrogen */ copy_rvec(x[donors->hydro[donors->dptr[d]][1]], xmol[2]); /* hydrogen */ svmul(15.9994*(1/1.008), xmol[0], xmean[0]); rvec_inc(xmean[0], xmol[1]); rvec_inc(xmean[0], xmol[2]); for (i = 0; i < 3; i++) { xmean[0][i] /= (15.9994 + 1.008 + 1.008)/1.008; } /* Assumes that all acceptors are also donors. */ copy_rvec(x[a], xmol[0]); /* acceptor */ copy_rvec(x[donors->hydro[donors->dptr[a]][0]], xmol[1]); /* hydrogen */ copy_rvec(x[donors->hydro[donors->dptr[a]][1]], xmol[2]); /* hydrogen */ svmul(15.9994*(1/1.008), xmol[0], xmean[1]); rvec_inc(xmean[1], xmol[1]); rvec_inc(xmean[1], xmol[2]); for (i = 0; i < 3; i++) { xmean[1][i] /= (15.9994 + 1.008 + 1.008)/1.008; } rvec_sub(xmean[0], xmean[1], dist); pbc_correct_gem(dist, box, hbox); r = norm(dist); realE = pow(r, -3.0); E = (t_E)(SCALEFACTOR_E * realE); break; case NN_dipole: /* Negative potential energy of a (unpolarizable) dipole. * E = -cos(alpha) * 1/r^3 */ clear_rvec(dipole[1]); clear_rvec(dipole[0]); copy_rvec(x[d], xmol[0]); /* donor */ copy_rvec(x[donors->hydro[donors->dptr[d]][0]], xmol[1]); /* hydrogen */ copy_rvec(x[donors->hydro[donors->dptr[d]][1]], xmol[2]); /* hydrogen */ rvec_inc(dipole[0], xmol[1]); rvec_inc(dipole[0], xmol[2]); for (i = 0; i < 3; i++) { dipole[0][i] *= 0.5; } rvec_dec(dipole[0], xmol[0]); svmul(15.9994*(1/1.008), xmol[0], xmean[0]); rvec_inc(xmean[0], xmol[1]); rvec_inc(xmean[0], xmol[2]); for (i = 0; i < 3; i++) { xmean[0][i] /= (15.9994 + 1.008 + 1.008)/1.008; } /* Assumes that all acceptors are also donors. */ copy_rvec(x[a], xmol[0]); /* acceptor */ copy_rvec(x[donors->hydro[donors->dptr[a]][0]], xmol[1]); /* hydrogen */ copy_rvec(x[donors->hydro[donors->dptr[a]][2]], xmol[2]); /* hydrogen */ rvec_inc(dipole[1], xmol[1]); rvec_inc(dipole[1], xmol[2]); for (i = 0; i < 3; i++) { dipole[1][i] *= 0.5; } rvec_dec(dipole[1], xmol[0]); svmul(15.9994*(1/1.008), xmol[0], xmean[1]); rvec_inc(xmean[1], xmol[1]); rvec_inc(xmean[1], xmol[2]); for (i = 0; i < 3; i++) { xmean[1][i] /= (15.9994 + 1.008 + 1.008)/1.008; } rvec_sub(xmean[0], xmean[1], dist); pbc_correct_gem(dist, box, hbox); r = norm(dist); double cosalpha = cos_angle(dipole[0], dipole[1]); realE = cosalpha * pow(r, -3.0); E = (t_E)(SCALEFACTOR_E * realE); break; default: printf("Can't do that type of energy estimate: %i\n.", EEst); E = NONSENSE_E; } return E; } static void storeHbEnergy(t_hbdata *hb, int d, int a, int h, t_E E, int frame) { /* hb - hbond data structure d - donor a - acceptor h - hydrogen E - estimate of the energy frame - the current frame. */ /* Store the estimated energy */ if (E == NONSENSE_E) { E = 0; } hb->hbE.E[d][a][h][frame] = E; #pragma omp critical { hb->hbE.Etot[frame] += E; } } #endif /* HAVE_NN_LOOPS */ /* Finds -v[] in the periodicity index */ static int findMirror(PSTYPE p, ivec v[], PSTYPE nper) { PSTYPE i; ivec u; for (i = 0; i < nper; i++) { if (v[i][XX] == -(v[p][XX]) && v[i][YY] == -(v[p][YY]) && v[i][ZZ] == -(v[p][ZZ])) { return (int)i; } } printf("Couldn't find mirror of [%i, %i, %i], index \n", v[p][XX], v[p][YY], v[p][ZZ]); return -1; } static void add_frames(t_hbdata *hb, int nframes) { int i, j, k, l; if (nframes >= hb->max_frames) { hb->max_frames += 4096; srenew(hb->time, hb->max_frames); srenew(hb->nhb, hb->max_frames); srenew(hb->ndist, hb->max_frames); srenew(hb->n_bound, hb->max_frames); srenew(hb->nhx, hb->max_frames); if (hb->bDAnr) { srenew(hb->danr, hb->max_frames); } } hb->nframes = nframes; } #define OFFSET(frame) (frame / 32) #define MASK(frame) (1 << (frame % 32)) static void _set_hb(unsigned int hbexist[], unsigned int frame, gmx_bool bValue) { if (bValue) { hbexist[OFFSET(frame)] |= MASK(frame); } else { hbexist[OFFSET(frame)] &= ~MASK(frame); } } static gmx_bool is_hb(unsigned int hbexist[], int frame) { return ((hbexist[OFFSET(frame)] & MASK(frame)) != 0) ? 1 : 0; } static void set_hb(t_hbdata *hb, int id, int ih, int ia, int frame, int ihb) { unsigned int *ghptr = NULL; if (ihb == hbHB) { ghptr = hb->hbmap[id][ia]->h[ih]; } else if (ihb == hbDist) { ghptr = hb->hbmap[id][ia]->g[ih]; } else { gmx_fatal(FARGS, "Incomprehensible iValue %d in set_hb", ihb); } _set_hb(ghptr, frame-hb->hbmap[id][ia]->n0, TRUE); } static void addPshift(t_pShift *pHist, PSTYPE p, int frame) { if (pHist->len == 0) { snew(pHist->frame, 1); snew(pHist->p, 1); pHist->len = 1; pHist->frame[0] = frame; pHist->p[0] = p; return; } else if (pHist->p[pHist->len-1] != p) { pHist->len++; srenew(pHist->frame, pHist->len); srenew(pHist->p, pHist->len); pHist->frame[pHist->len-1] = frame; pHist->p[pHist->len-1] = p; } /* Otherwise, there is no transition. */ return; } static PSTYPE getPshift(t_pShift pHist, int frame) { int f, i; if (pHist.len == 0 || (pHist.len > 0 && pHist.frame[0] > frame)) { return -1; } for (i = 0; i < pHist.len; i++) { f = pHist.frame[i]; if (f == frame) { return pHist.p[i]; } if (f > frame) { return pHist.p[i-1]; } } /* It seems that frame is after the last periodic transition. Return the last periodicity. */ return pHist.p[pHist.len-1]; } static void add_ff(t_hbdata *hbd, int id, int h, int ia, int frame, int ihb, PSTYPE p) { int i, j, n; t_hbond *hb = hbd->hbmap[id][ia]; int maxhydro = min(hbd->maxhydro, hbd->d.nhydro[id]); int wlen = hbd->wordlen; int delta = 32*wlen; gmx_bool bGem = hbd->bGem; if (!hb->h[0]) { hb->n0 = frame; hb->maxframes = delta; for (i = 0; (i < maxhydro); i++) { snew(hb->h[i], hb->maxframes/wlen); snew(hb->g[i], hb->maxframes/wlen); } } else { hb->nframes = frame-hb->n0; /* We need a while loop here because hbonds may be returning * after a long time. */ while (hb->nframes >= hb->maxframes) { n = hb->maxframes + delta; for (i = 0; (i < maxhydro); i++) { srenew(hb->h[i], n/wlen); srenew(hb->g[i], n/wlen); for (j = hb->maxframes/wlen; (j < n/wlen); j++) { hb->h[i][j] = 0; hb->g[i][j] = 0; } } hb->maxframes = n; } } if (frame >= 0) { set_hb(hbd, id, h, ia, frame, ihb); if (bGem) { if (p >= hbd->per->nper) { gmx_fatal(FARGS, "invalid shift: p=%u, nper=%u", p, hbd->per->nper); } else { addPshift(&(hbd->per->pHist[id][ia]), p, frame); } } } } static void inc_nhbonds(t_donors *ddd, int d, int h) { int j; int dptr = ddd->dptr[d]; for (j = 0; (j < ddd->nhydro[dptr]); j++) { if (ddd->hydro[dptr][j] == h) { ddd->nhbonds[dptr][j]++; break; } } if (j == ddd->nhydro[dptr]) { gmx_fatal(FARGS, "No such hydrogen %d on donor %d\n", h+1, d+1); } } static int _acceptor_index(t_acceptors *a, int grp, atom_id i, const char *file, int line) { int ai = a->aptr[i]; if (a->grp[ai] != grp) { if (debug && bDebug) { fprintf(debug, "Acc. group inconsist.. grp[%d] = %d, grp = %d (%s, %d)\n", ai, a->grp[ai], grp, file, line); } return NOTSET; } else { return ai; } } #define acceptor_index(a, grp, i) _acceptor_index(a, grp, i, __FILE__, __LINE__) static int _donor_index(t_donors *d, int grp, atom_id i, const char *file, int line) { int di = d->dptr[i]; if (di == NOTSET) { return NOTSET; } if (d->grp[di] != grp) { if (debug && bDebug) { fprintf(debug, "Don. group inconsist.. grp[%d] = %d, grp = %d (%s, %d)\n", di, d->grp[di], grp, file, line); } return NOTSET; } else { return di; } } #define donor_index(d, grp, i) _donor_index(d, grp, i, __FILE__, __LINE__) static gmx_bool isInterchangable(t_hbdata *hb, int d, int a, int grpa, int grpd) { /* g_hbond doesn't allow overlapping groups */ if (grpa != grpd) { return FALSE; } return donor_index(&hb->d, grpd, a) != NOTSET && acceptor_index(&hb->a, grpa, d) != NOTSET; } static void add_hbond(t_hbdata *hb, int d, int a, int h, int grpd, int grpa, int frame, gmx_bool bMerge, int ihb, gmx_bool bContact, PSTYPE p) { int k, id, ia, hh; gmx_bool daSwap = FALSE; if ((id = hb->d.dptr[d]) == NOTSET) { gmx_fatal(FARGS, "No donor atom %d", d+1); } else if (grpd != hb->d.grp[id]) { gmx_fatal(FARGS, "Inconsistent donor groups, %d iso %d, atom %d", grpd, hb->d.grp[id], d+1); } if ((ia = hb->a.aptr[a]) == NOTSET) { gmx_fatal(FARGS, "No acceptor atom %d", a+1); } else if (grpa != hb->a.grp[ia]) { gmx_fatal(FARGS, "Inconsistent acceptor groups, %d iso %d, atom %d", grpa, hb->a.grp[ia], a+1); } if (bMerge) { if (isInterchangable(hb, d, a, grpd, grpa) && d > a) /* Then swap identity so that the id of d is lower then that of a. * * This should really be redundant by now, as is_hbond() now ought to return * hbNo in the cases where this conditional is TRUE. */ { daSwap = TRUE; k = d; d = a; a = k; /* Now repeat donor/acc check. */ if ((id = hb->d.dptr[d]) == NOTSET) { gmx_fatal(FARGS, "No donor atom %d", d+1); } else if (grpd != hb->d.grp[id]) { gmx_fatal(FARGS, "Inconsistent donor groups, %d iso %d, atom %d", grpd, hb->d.grp[id], d+1); } if ((ia = hb->a.aptr[a]) == NOTSET) { gmx_fatal(FARGS, "No acceptor atom %d", a+1); } else if (grpa != hb->a.grp[ia]) { gmx_fatal(FARGS, "Inconsistent acceptor groups, %d iso %d, atom %d", grpa, hb->a.grp[ia], a+1); } } } if (hb->hbmap) { /* Loop over hydrogens to find which hydrogen is in this particular HB */ if ((ihb == hbHB) && !bMerge && !bContact) { for (k = 0; (k < hb->d.nhydro[id]); k++) { if (hb->d.hydro[id][k] == h) { break; } } if (k == hb->d.nhydro[id]) { gmx_fatal(FARGS, "Donor %d does not have hydrogen %d (a = %d)", d+1, h+1, a+1); } } else { k = 0; } if (hb->bHBmap) { #pragma omp critical { if (hb->hbmap[id][ia] == NULL) { snew(hb->hbmap[id][ia], 1); snew(hb->hbmap[id][ia]->h, hb->maxhydro); snew(hb->hbmap[id][ia]->g, hb->maxhydro); } add_ff(hb, id, k, ia, frame, ihb, p); } } /* Strange construction with frame >=0 is a relic from old code * for selected hbond analysis. It may be necessary again if that * is made to work again. */ if (frame >= 0) { hh = hb->hbmap[id][ia]->history[k]; if (ihb == hbHB) { hb->nhb[frame]++; if (!(ISHB(hh))) { hb->hbmap[id][ia]->history[k] = hh | 2; hb->nrhb++; } } else { if (ihb == hbDist) { hb->ndist[frame]++; if (!(ISDIST(hh))) { hb->hbmap[id][ia]->history[k] = hh | 1; hb->nrdist++; } } } } } else { if (frame >= 0) { if (ihb == hbHB) { hb->nhb[frame]++; } else { if (ihb == hbDist) { hb->ndist[frame]++; } } } } if (bMerge && daSwap) { h = hb->d.hydro[id][0]; } /* Increment number if HBonds per H */ if (ihb == hbHB && !bContact) { inc_nhbonds(&(hb->d), d, h); } } static char *mkatomname(t_atoms *atoms, int i) { static char buf[32]; int rnr; rnr = atoms->atom[i].resind; sprintf(buf, "%4s%d%-4s", *atoms->resinfo[rnr].name, atoms->resinfo[rnr].nr, *atoms->atomname[i]); return buf; } static void gen_datable(atom_id *index, int isize, unsigned char *datable, int natoms) { /* Generates table of all atoms and sets the ingroup bit for atoms in index[] */ int i; for (i = 0; i < isize; i++) { if (index[i] >= natoms) { gmx_fatal(FARGS, "Atom has index %d larger than number of atoms %d.", index[i], natoms); } datable[index[i]] |= INGROUP; } } static void clear_datable_grp(unsigned char *datable, int size) { /* Clears group information from the table */ int i; const char mask = !(char)INGROUP; if (size > 0) { for (i = 0; i < size; i++) { datable[i] &= mask; } } } static void add_acc(t_acceptors *a, int ia, int grp) { if (a->nra >= a->max_nra) { a->max_nra += 16; srenew(a->acc, a->max_nra); srenew(a->grp, a->max_nra); } a->grp[a->nra] = grp; a->acc[a->nra++] = ia; } static void search_acceptors(t_topology *top, int isize, atom_id *index, t_acceptors *a, int grp, gmx_bool bNitAcc, gmx_bool bContact, gmx_bool bDoIt, unsigned char *datable) { int i, n; if (bDoIt) { for (i = 0; (i < isize); i++) { n = index[i]; if ((bContact || (((*top->atoms.atomname[n])[0] == 'O') || (bNitAcc && ((*top->atoms.atomname[n])[0] == 'N')))) && ISINGRP(datable[n])) { datable[n] |= ACC; /* set the atom's acceptor flag in datable. */ add_acc(a, n, grp); } } } snew(a->aptr, top->atoms.nr); for (i = 0; (i < top->atoms.nr); i++) { a->aptr[i] = NOTSET; } for (i = 0; (i < a->nra); i++) { a->aptr[a->acc[i]] = i; } } static void add_h2d(int id, int ih, t_donors *ddd) { int i; for (i = 0; (i < ddd->nhydro[id]); i++) { if (ddd->hydro[id][i] == ih) { printf("Hm. This isn't the first time I found this donor (%d,%d)\n", ddd->don[id], ih); break; } } if (i == ddd->nhydro[id]) { if (ddd->nhydro[id] >= MAXHYDRO) { gmx_fatal(FARGS, "Donor %d has more than %d hydrogens!", ddd->don[id], MAXHYDRO); } ddd->hydro[id][i] = ih; ddd->nhydro[id]++; } } static void add_dh(t_donors *ddd, int id, int ih, int grp, unsigned char *datable) { int i; if (ISDON(datable[id]) || !datable) { if (ddd->dptr[id] == NOTSET) /* New donor */ { i = ddd->nrd; ddd->dptr[id] = i; } else { i = ddd->dptr[id]; } if (i == ddd->nrd) { if (ddd->nrd >= ddd->max_nrd) { ddd->max_nrd += 128; srenew(ddd->don, ddd->max_nrd); srenew(ddd->nhydro, ddd->max_nrd); srenew(ddd->hydro, ddd->max_nrd); srenew(ddd->nhbonds, ddd->max_nrd); srenew(ddd->grp, ddd->max_nrd); } ddd->don[ddd->nrd] = id; ddd->nhydro[ddd->nrd] = 0; ddd->grp[ddd->nrd] = grp; ddd->nrd++; } else { ddd->don[i] = id; } add_h2d(i, ih, ddd); } else if (datable) { printf("Warning: Atom %d is not in the d/a-table!\n", id); } } static void search_donors(t_topology *top, int isize, atom_id *index, t_donors *ddd, int grp, gmx_bool bContact, gmx_bool bDoIt, unsigned char *datable) { int i, j, nra, n; t_functype func_type; t_ilist *interaction; atom_id nr1, nr2, nr3; gmx_bool stop; if (!ddd->dptr) { snew(ddd->dptr, top->atoms.nr); for (i = 0; (i < top->atoms.nr); i++) { ddd->dptr[i] = NOTSET; } } if (bContact) { if (bDoIt) { for (i = 0; (i < isize); i++) { datable[index[i]] |= DON; add_dh(ddd, index[i], -1, grp, datable); } } } else { for (func_type = 0; (func_type < F_NRE); func_type++) { interaction = &(top->idef.il[func_type]); if (func_type == F_POSRES) { /* The ilist looks strange for posre. Bug in grompp? * We don't need posre interactions for hbonds anyway.*/ continue; } for (i = 0; i < interaction->nr; i += interaction_function[top->idef.functype[interaction->iatoms[i]]].nratoms+1) { /* next function */ if (func_type != top->idef.functype[interaction->iatoms[i]]) { fprintf(stderr, "Error in func_type %s", interaction_function[func_type].longname); continue; } /* check out this functype */ if (func_type == F_SETTLE) { nr1 = interaction->iatoms[i+1]; nr2 = interaction->iatoms[i+2]; nr3 = interaction->iatoms[i+3]; if (ISINGRP(datable[nr1])) { if (ISINGRP(datable[nr2])) { datable[nr1] |= DON; add_dh(ddd, nr1, nr1+1, grp, datable); } if (ISINGRP(datable[nr3])) { datable[nr1] |= DON; add_dh(ddd, nr1, nr1+2, grp, datable); } } } else if (IS_CHEMBOND(func_type)) { for (j = 0; j < 2; j++) { nr1 = interaction->iatoms[i+1+j]; nr2 = interaction->iatoms[i+2-j]; if ((*top->atoms.atomname[nr1][0] == 'H') && ((*top->atoms.atomname[nr2][0] == 'O') || (*top->atoms.atomname[nr2][0] == 'N')) && ISINGRP(datable[nr1]) && ISINGRP(datable[nr2])) { datable[nr2] |= DON; add_dh(ddd, nr2, nr1, grp, datable); } } } } } #ifdef SAFEVSITES for (func_type = 0; func_type < F_NRE; func_type++) { interaction = &top->idef.il[func_type]; for (i = 0; i < interaction->nr; i += interaction_function[top->idef.functype[interaction->iatoms[i]]].nratoms+1) { /* next function */ if (func_type != top->idef.functype[interaction->iatoms[i]]) { gmx_incons("function type in search_donors"); } if (interaction_function[func_type].flags & IF_VSITE) { nr1 = interaction->iatoms[i+1]; if (*top->atoms.atomname[nr1][0] == 'H') { nr2 = nr1-1; stop = FALSE; while (!stop && ( *top->atoms.atomname[nr2][0] == 'H')) { if (nr2) { nr2--; } else { stop = TRUE; } } if (!stop && ( ( *top->atoms.atomname[nr2][0] == 'O') || ( *top->atoms.atomname[nr2][0] == 'N') ) && ISINGRP(datable[nr1]) && ISINGRP(datable[nr2])) { datable[nr2] |= DON; add_dh(ddd, nr2, nr1, grp, datable); } } } } } #endif } } static t_gridcell ***init_grid(gmx_bool bBox, rvec box[], real rcut, ivec ngrid) { t_gridcell ***grid; int i, y, z; if (bBox) { for (i = 0; i < DIM; i++) { ngrid[i] = (box[i][i]/(1.2*rcut)); } } if (!bBox || (ngrid[XX] < 3) || (ngrid[YY] < 3) || (ngrid[ZZ] < 3) ) { for (i = 0; i < DIM; i++) { ngrid[i] = 1; } } else { printf("\nWill do grid-seach on %dx%dx%d grid, rcut=%g\n", ngrid[XX], ngrid[YY], ngrid[ZZ], rcut); } snew(grid, ngrid[ZZ]); for (z = 0; z < ngrid[ZZ]; z++) { snew((grid)[z], ngrid[YY]); for (y = 0; y < ngrid[YY]; y++) { snew((grid)[z][y], ngrid[XX]); } } return grid; } static void reset_nhbonds(t_donors *ddd) { int i, j; for (i = 0; (i < ddd->nrd); i++) { for (j = 0; (j < MAXHH); j++) { ddd->nhbonds[i][j] = 0; } } } void pbc_correct_gem(rvec dx, matrix box, rvec hbox); static void build_grid(t_hbdata *hb, rvec x[], rvec xshell, gmx_bool bBox, matrix box, rvec hbox, real rcut, real rshell, ivec ngrid, t_gridcell ***grid) { int i, m, gr, xi, yi, zi, nr; atom_id *ad; ivec grididx; rvec invdelta, dshell, xtemp = {0, 0, 0}; t_ncell *newgrid; gmx_bool bDoRshell, bInShell, bAcc; real rshell2 = 0; int gx, gy, gz; int dum = -1; bDoRshell = (rshell > 0); rshell2 = sqr(rshell); bInShell = TRUE; #define DBB(x) if (debug && bDebug) fprintf(debug, "build_grid, line %d, %s = %d\n", __LINE__,#x, x) DBB(dum); for (m = 0; m < DIM; m++) { hbox[m] = box[m][m]*0.5; if (bBox) { invdelta[m] = ngrid[m]/box[m][m]; if (1/invdelta[m] < rcut) { gmx_fatal(FARGS, "Your computational box has shrunk too much.\n" "%s can not handle this situation, sorry.\n", ShortProgram()); } } else { invdelta[m] = 0; } } grididx[XX] = 0; grididx[YY] = 0; grididx[ZZ] = 0; DBB(dum); /* resetting atom counts */ for (gr = 0; (gr < grNR); gr++) { for (zi = 0; zi < ngrid[ZZ]; zi++) { for (yi = 0; yi < ngrid[YY]; yi++) { for (xi = 0; xi < ngrid[XX]; xi++) { grid[zi][yi][xi].d[gr].nr = 0; grid[zi][yi][xi].a[gr].nr = 0; } } } DBB(dum); /* put atoms in grid cells */ for (bAcc = FALSE; (bAcc <= TRUE); bAcc++) { if (bAcc) { nr = hb->a.nra; ad = hb->a.acc; } else { nr = hb->d.nrd; ad = hb->d.don; } DBB(bAcc); for (i = 0; (i < nr); i++) { /* check if we are inside the shell */ /* if bDoRshell=FALSE then bInShell=TRUE always */ DBB(i); if (bDoRshell) { bInShell = TRUE; rvec_sub(x[ad[i]], xshell, dshell); if (bBox) { if (FALSE && !hb->bGem) { for (m = DIM-1; m >= 0 && bInShell; m--) { if (dshell[m] < -hbox[m]) { rvec_inc(dshell, box[m]); } else if (dshell[m] >= hbox[m]) { dshell[m] -= 2*hbox[m]; } /* if we're outside the cube, we're outside the sphere also! */ if ( (dshell[m] > rshell) || (-dshell[m] > rshell) ) { bInShell = FALSE; } } } else { gmx_bool bDone = FALSE; while (!bDone) { bDone = TRUE; for (m = DIM-1; m >= 0 && bInShell; m--) { if (dshell[m] < -hbox[m]) { bDone = FALSE; rvec_inc(dshell, box[m]); } if (dshell[m] >= hbox[m]) { bDone = FALSE; dshell[m] -= 2*hbox[m]; } } } for (m = DIM-1; m >= 0 && bInShell; m--) { /* if we're outside the cube, we're outside the sphere also! */ if ( (dshell[m] > rshell) || (-dshell[m] > rshell) ) { bInShell = FALSE; } } } } /* if we're inside the cube, check if we're inside the sphere */ if (bInShell) { bInShell = norm2(dshell) < rshell2; } } DBB(i); if (bInShell) { if (bBox) { if (hb->bGem) { copy_rvec(x[ad[i]], xtemp); } pbc_correct_gem(x[ad[i]], box, hbox); } for (m = DIM-1; m >= 0; m--) { if (TRUE || !hb->bGem) { /* put atom in the box */ while (x[ad[i]][m] < 0) { rvec_inc(x[ad[i]], box[m]); } while (x[ad[i]][m] >= box[m][m]) { rvec_dec(x[ad[i]], box[m]); } } /* determine grid index of atom */ grididx[m] = x[ad[i]][m]*invdelta[m]; grididx[m] = (grididx[m]+ngrid[m]) % ngrid[m]; } if (hb->bGem) { copy_rvec(xtemp, x[ad[i]]); /* copy back */ } gx = grididx[XX]; gy = grididx[YY]; gz = grididx[ZZ]; range_check(gx, 0, ngrid[XX]); range_check(gy, 0, ngrid[YY]); range_check(gz, 0, ngrid[ZZ]); DBB(gx); DBB(gy); DBB(gz); /* add atom to grid cell */ if (bAcc) { newgrid = &(grid[gz][gy][gx].a[gr]); } else { newgrid = &(grid[gz][gy][gx].d[gr]); } if (newgrid->nr >= newgrid->maxnr) { newgrid->maxnr += 10; DBB(newgrid->maxnr); srenew(newgrid->atoms, newgrid->maxnr); } DBB(newgrid->nr); newgrid->atoms[newgrid->nr] = ad[i]; newgrid->nr++; } } } } } static void count_da_grid(ivec ngrid, t_gridcell ***grid, t_icell danr) { int gr, xi, yi, zi; for (gr = 0; (gr < grNR); gr++) { danr[gr] = 0; for (zi = 0; zi < ngrid[ZZ]; zi++) { for (yi = 0; yi < ngrid[YY]; yi++) { for (xi = 0; xi < ngrid[XX]; xi++) { danr[gr] += grid[zi][yi][xi].d[gr].nr; } } } } } /* The grid loop. * Without a box, the grid is 1x1x1, so all loops are 1 long. * With a rectangular box (bTric==FALSE) all loops are 3 long. * With a triclinic box all loops are 3 long, except when a cell is * located next to one of the box edges which is not parallel to the * x/y-plane, in that case all cells in a line or layer are searched. * This could be implemented slightly more efficient, but the code * would get much more complicated. */ static inline gmx_bool grid_loop_begin(int n, int x, gmx_bool bTric, gmx_bool bEdge) { return ((n == 1) ? x : bTric && bEdge ? 0 : (x-1)); } static inline gmx_bool grid_loop_end(int n, int x, gmx_bool bTric, gmx_bool bEdge) { return ((n == 1) ? x : bTric && bEdge ? (n-1) : (x+1)); } static inline int grid_mod(int j, int n) { return (j+n) % (n); } static void dump_grid(FILE *fp, ivec ngrid, t_gridcell ***grid) { int gr, x, y, z, sum[grNR]; fprintf(fp, "grid %dx%dx%d\n", ngrid[XX], ngrid[YY], ngrid[ZZ]); for (gr = 0; gr < grNR; gr++) { sum[gr] = 0; fprintf(fp, "GROUP %d (%s)\n", gr, grpnames[gr]); for (z = 0; z < ngrid[ZZ]; z += 2) { fprintf(fp, "Z=%d,%d\n", z, z+1); for (y = 0; y < ngrid[YY]; y++) { for (x = 0; x < ngrid[XX]; x++) { fprintf(fp, "%3d", grid[x][y][z].d[gr].nr); sum[gr] += grid[z][y][x].d[gr].nr; fprintf(fp, "%3d", grid[x][y][z].a[gr].nr); sum[gr] += grid[z][y][x].a[gr].nr; } fprintf(fp, " | "); if ( (z+1) < ngrid[ZZ]) { for (x = 0; x < ngrid[XX]; x++) { fprintf(fp, "%3d", grid[z+1][y][x].d[gr].nr); sum[gr] += grid[z+1][y][x].d[gr].nr; fprintf(fp, "%3d", grid[z+1][y][x].a[gr].nr); sum[gr] += grid[z+1][y][x].a[gr].nr; } } fprintf(fp, "\n"); } } } fprintf(fp, "TOTALS:"); for (gr = 0; gr < grNR; gr++) { fprintf(fp, " %d=%d", gr, sum[gr]); } fprintf(fp, "\n"); } /* New GMX record! 5 * in a row. Congratulations! * Sorry, only four left. */ static void free_grid(ivec ngrid, t_gridcell ****grid) { int y, z; t_gridcell ***g = *grid; for (z = 0; z < ngrid[ZZ]; z++) { for (y = 0; y < ngrid[YY]; y++) { sfree(g[z][y]); } sfree(g[z]); } sfree(g); g = NULL; } void pbc_correct_gem(rvec dx, matrix box, rvec hbox) { int m; gmx_bool bDone = FALSE; while (!bDone) { bDone = TRUE; for (m = DIM-1; m >= 0; m--) { if (dx[m] < -hbox[m]) { bDone = FALSE; rvec_inc(dx, box[m]); } if (dx[m] >= hbox[m]) { bDone = FALSE; rvec_dec(dx, box[m]); } } } } /* Added argument r2cut, changed contact and implemented * use of second cut-off. * - Erik Marklund, June 29, 2006 */ static int is_hbond(t_hbdata *hb, int grpd, int grpa, int d, int a, real rcut, real r2cut, real ccut, rvec x[], gmx_bool bBox, matrix box, rvec hbox, real *d_ha, real *ang, gmx_bool bDA, int *hhh, gmx_bool bContact, gmx_bool bMerge, PSTYPE *p) { int h, hh, id, ja, ihb; rvec r_da, r_ha, r_dh, r = {0, 0, 0}; ivec ri; real rc2, r2c2, rda2, rha2, ca; gmx_bool HAinrange = FALSE; /* If !bDA. Needed for returning hbDist in a correct way. */ gmx_bool daSwap = FALSE; if (d == a) { return hbNo; } if (((id = donor_index(&hb->d, grpd, d)) == NOTSET) || ((ja = acceptor_index(&hb->a, grpa, a)) == NOTSET)) { return hbNo; } rc2 = rcut*rcut; r2c2 = r2cut*r2cut; rvec_sub(x[d], x[a], r_da); /* Insert projection code here */ if (bMerge && d > a && isInterchangable(hb, d, a, grpd, grpa)) { /* Then this hbond/contact will be found again, or it has already been found. */ /*return hbNo;*/ } if (bBox) { if (d > a && bMerge && isInterchangable(hb, d, a, grpd, grpa)) /* acceptor is also a donor and vice versa? */ { /* return hbNo; */ daSwap = TRUE; /* If so, then their history should be filed with donor and acceptor swapped. */ } if (hb->bGem) { copy_rvec(r_da, r); /* Save this for later */ pbc_correct_gem(r_da, box, hbox); } else { pbc_correct_gem(r_da, box, hbox); } } rda2 = iprod(r_da, r_da); if (bContact) { if (daSwap && grpa == grpd) { return hbNo; } if (rda2 <= rc2) { if (hb->bGem) { calcBoxDistance(hb->per->P, r, ri); *p = periodicIndex(ri, hb->per, daSwap); /* find (or add) periodicity index. */ } return hbHB; } else if (rda2 < r2c2) { return hbDist; } else { return hbNo; } } *hhh = NOTSET; if (bDA && (rda2 > rc2)) { return hbNo; } for (h = 0; (h < hb->d.nhydro[id]); h++) { hh = hb->d.hydro[id][h]; rha2 = rc2+1; if (!bDA) { rvec_sub(x[hh], x[a], r_ha); if (bBox) { pbc_correct_gem(r_ha, box, hbox); } rha2 = iprod(r_ha, r_ha); } if (hb->bGem) { calcBoxDistance(hb->per->P, r, ri); *p = periodicIndex(ri, hb->per, daSwap); /* find periodicity index. */ } if (bDA || (!bDA && (rha2 <= rc2))) { rvec_sub(x[d], x[hh], r_dh); if (bBox) { pbc_correct_gem(r_dh, box, hbox); } if (!bDA) { HAinrange = TRUE; } ca = cos_angle(r_dh, r_da); /* if angle is smaller, cos is larger */ if (ca >= ccut) { *hhh = hh; *d_ha = sqrt(bDA ? rda2 : rha2); *ang = acos(ca); return hbHB; } } } if (bDA || (!bDA && HAinrange)) { return hbDist; } else { return hbNo; } } /* Fixed previously undiscovered bug in the merge code, where the last frame of each hbond disappears. - Erik Marklund, June 1, 2006 */ /* Added the following arguments: * ptmp[] - temporary periodicity hisory * a1 - identity of first acceptor/donor * a2 - identity of second acceptor/donor * - Erik Marklund, FEB 20 2010 */ /* Merging is now done on the fly, so do_merge is most likely obsolete now. * Will do some more testing before removing the function entirely. * - Erik Marklund, MAY 10 2010 */ static void do_merge(t_hbdata *hb, int ntmp, unsigned int htmp[], unsigned int gtmp[], PSTYPE ptmp[], t_hbond *hb0, t_hbond *hb1, int a1, int a2) { /* Here we need to make sure we're treating periodicity in * the right way for the geminate recombination kinetics. */ int m, mm, n00, n01, nn0, nnframes; PSTYPE pm; t_pShift *pShift; /* Decide where to start from when merging */ n00 = hb0->n0; n01 = hb1->n0; nn0 = min(n00, n01); nnframes = max(n00 + hb0->nframes, n01 + hb1->nframes) - nn0; /* Initiate tmp arrays */ for (m = 0; (m < ntmp); m++) { htmp[m] = 0; gtmp[m] = 0; ptmp[m] = 0; } /* Fill tmp arrays with values due to first HB */ /* Once again '<' had to be replaced with '<=' to catch the last frame in which the hbond appears. - Erik Marklund, June 1, 2006 */ for (m = 0; (m <= hb0->nframes); m++) { mm = m+n00-nn0; htmp[mm] = is_hb(hb0->h[0], m); if (hb->bGem) { pm = getPshift(hb->per->pHist[a1][a2], m+hb0->n0); if (pm > hb->per->nper) { gmx_fatal(FARGS, "Illegal shift!"); } else { ptmp[mm] = pm; /*hb->per->pHist[a1][a2][m];*/ } } } /* If we're doing geminate recompbination we usually don't need the distances. * Let's save some memory and time. */ if (TRUE || !hb->bGem || hb->per->gemtype == gemAD) { for (m = 0; (m <= hb0->nframes); m++) { mm = m+n00-nn0; gtmp[mm] = is_hb(hb0->g[0], m); } } /* Next HB */ for (m = 0; (m <= hb1->nframes); m++) { mm = m+n01-nn0; htmp[mm] = htmp[mm] || is_hb(hb1->h[0], m); gtmp[mm] = gtmp[mm] || is_hb(hb1->g[0], m); if (hb->bGem /* && ptmp[mm] != 0 */) { /* If this hbond has been seen before with donor and acceptor swapped, * then we need to find the mirrored (*-1) periodicity vector to truely * merge the hbond history. */ pm = findMirror(getPshift(hb->per->pHist[a2][a1], m+hb1->n0), hb->per->p2i, hb->per->nper); /* Store index of mirror */ if (pm > hb->per->nper) { gmx_fatal(FARGS, "Illegal shift!"); } ptmp[mm] = pm; } } /* Reallocate target array */ if (nnframes > hb0->maxframes) { srenew(hb0->h[0], 4+nnframes/hb->wordlen); srenew(hb0->g[0], 4+nnframes/hb->wordlen); } if (NULL != hb->per->pHist) { clearPshift(&(hb->per->pHist[a1][a2])); } /* Copy temp array to target array */ for (m = 0; (m <= nnframes); m++) { _set_hb(hb0->h[0], m, htmp[m]); _set_hb(hb0->g[0], m, gtmp[m]); if (hb->bGem) { addPshift(&(hb->per->pHist[a1][a2]), ptmp[m], m+nn0); } } /* Set scalar variables */ hb0->n0 = nn0; hb0->maxframes = nnframes; } /* Added argument bContact for nicer output. * Erik Marklund, June 29, 2006 */ static void merge_hb(t_hbdata *hb, gmx_bool bTwo, gmx_bool bContact) { int i, inrnew, indnew, j, ii, jj, m, id, ia, grp, ogrp, ntmp; unsigned int *htmp, *gtmp; PSTYPE *ptmp; t_hbond *hb0, *hb1; inrnew = hb->nrhb; indnew = hb->nrdist; /* Check whether donors are also acceptors */ printf("Merging hbonds with Acceptor and Donor swapped\n"); ntmp = 2*hb->max_frames; snew(gtmp, ntmp); snew(htmp, ntmp); snew(ptmp, ntmp); for (i = 0; (i < hb->d.nrd); i++) { fprintf(stderr, "\r%d/%d", i+1, hb->d.nrd); id = hb->d.don[i]; ii = hb->a.aptr[id]; for (j = 0; (j < hb->a.nra); j++) { ia = hb->a.acc[j]; jj = hb->d.dptr[ia]; if ((id != ia) && (ii != NOTSET) && (jj != NOTSET) && (!bTwo || (bTwo && (hb->d.grp[i] != hb->a.grp[j])))) { hb0 = hb->hbmap[i][j]; hb1 = hb->hbmap[jj][ii]; if (hb0 && hb1 && ISHB(hb0->history[0]) && ISHB(hb1->history[0])) { do_merge(hb, ntmp, htmp, gtmp, ptmp, hb0, hb1, i, j); if (ISHB(hb1->history[0])) { inrnew--; } else if (ISDIST(hb1->history[0])) { indnew--; } else if (bContact) { gmx_incons("No contact history"); } else { gmx_incons("Neither hydrogen bond nor distance"); } sfree(hb1->h[0]); sfree(hb1->g[0]); if (hb->bGem) { clearPshift(&(hb->per->pHist[jj][ii])); } hb1->h[0] = NULL; hb1->g[0] = NULL; hb1->history[0] = hbNo; } } } } fprintf(stderr, "\n"); printf("- Reduced number of hbonds from %d to %d\n", hb->nrhb, inrnew); printf("- Reduced number of distances from %d to %d\n", hb->nrdist, indnew); hb->nrhb = inrnew; hb->nrdist = indnew; sfree(gtmp); sfree(htmp); sfree(ptmp); } static void do_nhb_dist(FILE *fp, t_hbdata *hb, real t) { int i, j, k, n_bound[MAXHH], nbtot; h_id nhb; /* Set array to 0 */ for (k = 0; (k < MAXHH); k++) { n_bound[k] = 0; } /* Loop over possible donors */ for (i = 0; (i < hb->d.nrd); i++) { for (j = 0; (j < hb->d.nhydro[i]); j++) { n_bound[hb->d.nhbonds[i][j]]++; } } fprintf(fp, "%12.5e", t); nbtot = 0; for (k = 0; (k < MAXHH); k++) { fprintf(fp, " %8d", n_bound[k]); nbtot += n_bound[k]*k; } fprintf(fp, " %8d\n", nbtot); } /* Added argument bContact in do_hblife(...). Also * added support for -contact in function body. * - Erik Marklund, May 31, 2006 */ /* Changed the contact code slightly. * - Erik Marklund, June 29, 2006 */ static void do_hblife(const char *fn, t_hbdata *hb, gmx_bool bMerge, gmx_bool bContact, const output_env_t oenv) { FILE *fp; const char *leg[] = { "p(t)", "t p(t)" }; int *histo; int i, j, j0, k, m, nh, ihb, ohb, nhydro, ndump = 0; int nframes = hb->nframes; unsigned int **h; real t, x1, dt; double sum, integral; t_hbond *hbh; snew(h, hb->maxhydro); snew(histo, nframes+1); /* Total number of hbonds analyzed here */ for (i = 0; (i < hb->d.nrd); i++) { for (k = 0; (k < hb->a.nra); k++) { hbh = hb->hbmap[i][k]; if (hbh) { if (bMerge) { if (hbh->h[0]) { h[0] = hbh->h[0]; nhydro = 1; } else { nhydro = 0; } } else { nhydro = 0; for (m = 0; (m < hb->maxhydro); m++) { if (hbh->h[m]) { h[nhydro++] = bContact ? hbh->g[m] : hbh->h[m]; } } } for (nh = 0; (nh < nhydro); nh++) { ohb = 0; j0 = 0; /* Changed '<' into '<=' below, just like I did in the hbm-output-loop in the main code. - Erik Marklund, May 31, 2006 */ for (j = 0; (j <= hbh->nframes); j++) { ihb = is_hb(h[nh], j); if (debug && (ndump < 10)) { fprintf(debug, "%5d %5d\n", j, ihb); } if (ihb != ohb) { if (ihb) { j0 = j; } else { histo[j-j0]++; } ohb = ihb; } } ndump++; } } } } fprintf(stderr, "\n"); if (bContact) { fp = xvgropen(fn, "Uninterrupted contact lifetime", output_env_get_xvgr_tlabel(oenv), "()", oenv); } else { fp = xvgropen(fn, "Uninterrupted hydrogen bond lifetime", output_env_get_xvgr_tlabel(oenv), "()", oenv); } xvgr_legend(fp, asize(leg), leg, oenv); j0 = nframes-1; while ((j0 > 0) && (histo[j0] == 0)) { j0--; } sum = 0; for (i = 0; (i <= j0); i++) { sum += histo[i]; } dt = hb->time[1]-hb->time[0]; sum = dt*sum; integral = 0; for (i = 1; (i <= j0); i++) { t = hb->time[i] - hb->time[0] - 0.5*dt; x1 = t*histo[i]/sum; fprintf(fp, "%8.3f %10.3e %10.3e\n", t, histo[i]/sum, x1); integral += x1; } integral *= dt; ffclose(fp); printf("%s lifetime = %.2f ps\n", bContact ? "Contact" : "HB", integral); printf("Note that the lifetime obtained in this manner is close to useless\n"); printf("Use the -ac option instead and check the Forward lifetime\n"); please_cite(stdout, "Spoel2006b"); sfree(h); sfree(histo); } /* Changed argument bMerge into oneHB to handle contacts properly. * - Erik Marklund, June 29, 2006 */ static void dump_ac(t_hbdata *hb, gmx_bool oneHB, int nDump) { FILE *fp; int i, j, k, m, nd, ihb, idist; int nframes = hb->nframes; gmx_bool bPrint; t_hbond *hbh; if (nDump <= 0) { return; } fp = ffopen("debug-ac.xvg", "w"); for (j = 0; (j < nframes); j++) { fprintf(fp, "%10.3f", hb->time[j]); for (i = nd = 0; (i < hb->d.nrd) && (nd < nDump); i++) { for (k = 0; (k < hb->a.nra) && (nd < nDump); k++) { bPrint = FALSE; ihb = idist = 0; hbh = hb->hbmap[i][k]; if (oneHB) { if (hbh->h[0]) { ihb = is_hb(hbh->h[0], j); idist = is_hb(hbh->g[0], j); bPrint = TRUE; } } else { for (m = 0; (m < hb->maxhydro) && !ihb; m++) { ihb = ihb || ((hbh->h[m]) && is_hb(hbh->h[m], j)); idist = idist || ((hbh->g[m]) && is_hb(hbh->g[m], j)); } /* This is not correct! */ /* What isn't correct? -Erik M */ bPrint = TRUE; } if (bPrint) { fprintf(fp, " %1d-%1d", ihb, idist); nd++; } } } fprintf(fp, "\n"); } ffclose(fp); } static real calc_dg(real tau, real temp) { real kbt; kbt = BOLTZ*temp; if (tau <= 0) { return -666; } else { return kbt*log(kbt*tau/PLANCK); } } typedef struct { int n0, n1, nparams, ndelta; real kkk[2]; real *t, *ct, *nt, *kt, *sigma_ct, *sigma_nt, *sigma_kt; } t_luzar; #ifdef HAVE_LIBGSL #include <gsl/gsl_multimin.h> #include <gsl/gsl_sf.h> #include <gsl/gsl_version.h> static double my_f(const gsl_vector *v, void *params) { t_luzar *tl = (t_luzar *)params; int i; double tol = 1e-16, chi2 = 0; double di; real k, kp; for (i = 0; (i < tl->nparams); i++) { tl->kkk[i] = gsl_vector_get(v, i); } k = tl->kkk[0]; kp = tl->kkk[1]; for (i = tl->n0; (i < tl->n1); i += tl->ndelta) { di = sqr(k*tl->sigma_ct[i]) + sqr(kp*tl->sigma_nt[i]) + sqr(tl->sigma_kt[i]); /*di = 1;*/ if (di > tol) { chi2 += sqr(k*tl->ct[i]-kp*tl->nt[i]-tl->kt[i])/di; } else { fprintf(stderr, "WARNING: sigma_ct = %g, sigma_nt = %g, sigma_kt = %g\n" "di = %g k = %g kp = %g\n", tl->sigma_ct[i], tl->sigma_nt[i], tl->sigma_kt[i], di, k, kp); } } #ifdef DEBUG chi2 = 0.3*sqr(k-0.6)+0.7*sqr(kp-1.3); #endif return chi2; } static real optimize_luzar_parameters(FILE *fp, t_luzar *tl, int maxiter, real tol) { real size, d2; int iter = 0; int status = 0; int i; const gsl_multimin_fminimizer_type *T; gsl_multimin_fminimizer *s; gsl_vector *x, *dx; gsl_multimin_function my_func; my_func.f = &my_f; my_func.n = tl->nparams; my_func.params = (void *) tl; /* Starting point */ x = gsl_vector_alloc (my_func.n); for (i = 0; (i < my_func.n); i++) { gsl_vector_set (x, i, tl->kkk[i]); } /* Step size, different for each of the parameters */ dx = gsl_vector_alloc (my_func.n); for (i = 0; (i < my_func.n); i++) { gsl_vector_set (dx, i, 0.01*tl->kkk[i]); } T = gsl_multimin_fminimizer_nmsimplex; s = gsl_multimin_fminimizer_alloc (T, my_func.n); gsl_multimin_fminimizer_set (s, &my_func, x, dx); gsl_vector_free (x); gsl_vector_free (dx); if (fp) { fprintf(fp, "%5s %12s %12s %12s %12s\n", "Iter", "k", "kp", "NM Size", "Chi2"); } do { iter++; status = gsl_multimin_fminimizer_iterate (s); if (status != 0) { gmx_fatal(FARGS, "Something went wrong in the iteration in minimizer %s", gsl_multimin_fminimizer_name(s)); } d2 = gsl_multimin_fminimizer_minimum(s); size = gsl_multimin_fminimizer_size(s); status = gsl_multimin_test_size(size, tol); if (status == GSL_SUCCESS) { if (fp) { fprintf(fp, "Minimum found using %s at:\n", gsl_multimin_fminimizer_name(s)); } } if (fp) { fprintf(fp, "%5d", iter); for (i = 0; (i < my_func.n); i++) { fprintf(fp, " %12.4e", gsl_vector_get (s->x, i)); } fprintf (fp, " %12.4e %12.4e\n", size, d2); } } while ((status == GSL_CONTINUE) && (iter < maxiter)); gsl_multimin_fminimizer_free (s); return d2; } static real quality_of_fit(real chi2, int N) { return gsl_sf_gamma_inc_Q((N-2)/2.0, chi2/2.0); } #else static real optimize_luzar_parameters(FILE *fp, t_luzar *tl, int maxiter, real tol) { fprintf(stderr, "This program needs the GNU scientific library to work.\n"); return -1; } static real quality_of_fit(real chi2, int N) { fprintf(stderr, "This program needs the GNU scientific library to work.\n"); return -1; } #endif static real compute_weighted_rates(int n, real t[], real ct[], real nt[], real kt[], real sigma_ct[], real sigma_nt[], real sigma_kt[], real *k, real *kp, real *sigma_k, real *sigma_kp, real fit_start) { #define NK 10 int i, j; t_luzar tl; real kkk = 0, kkp = 0, kk2 = 0, kp2 = 0, chi2; *sigma_k = 0; *sigma_kp = 0; for (i = 0; (i < n); i++) { if (t[i] >= fit_start) { break; } } tl.n0 = i; tl.n1 = n; tl.nparams = 2; tl.ndelta = 1; tl.t = t; tl.ct = ct; tl.nt = nt; tl.kt = kt; tl.sigma_ct = sigma_ct; tl.sigma_nt = sigma_nt; tl.sigma_kt = sigma_kt; tl.kkk[0] = *k; tl.kkk[1] = *kp; chi2 = optimize_luzar_parameters(debug, &tl, 1000, 1e-3); *k = tl.kkk[0]; *kp = tl.kkk[1] = *kp; tl.ndelta = NK; for (j = 0; (j < NK); j++) { (void) optimize_luzar_parameters(debug, &tl, 1000, 1e-3); kkk += tl.kkk[0]; kkp += tl.kkk[1]; kk2 += sqr(tl.kkk[0]); kp2 += sqr(tl.kkk[1]); tl.n0++; } *sigma_k = sqrt(kk2/NK - sqr(kkk/NK)); *sigma_kp = sqrt(kp2/NK - sqr(kkp/NK)); return chi2; } static void smooth_tail(int n, real t[], real c[], real sigma_c[], real start, const output_env_t oenv) { FILE *fp; real e_1, fitparm[4]; int i; e_1 = exp(-1); for (i = 0; (i < n); i++) { if (c[i] < e_1) { break; } } if (i < n) { fitparm[0] = t[i]; } else { fitparm[0] = 10; } fitparm[1] = 0.95; do_lmfit(n, c, sigma_c, 0, t, start, t[n-1], oenv, bDebugMode(), effnEXP2, fitparm, 0); } void analyse_corr(int n, real t[], real ct[], real nt[], real kt[], real sigma_ct[], real sigma_nt[], real sigma_kt[], real fit_start, real temp, real smooth_tail_start, const output_env_t oenv) { int i0, i; real k = 1, kp = 1, kow = 1; real Q = 0, chi22, chi2, dg, dgp, tau_hb, dtau, tau_rlx, e_1, dt, sigma_k, sigma_kp, ddg; double tmp, sn2 = 0, sc2 = 0, sk2 = 0, scn = 0, sck = 0, snk = 0; gmx_bool bError = (sigma_ct != NULL) && (sigma_nt != NULL) && (sigma_kt != NULL); if (smooth_tail_start >= 0) { smooth_tail(n, t, ct, sigma_ct, smooth_tail_start, oenv); smooth_tail(n, t, nt, sigma_nt, smooth_tail_start, oenv); smooth_tail(n, t, kt, sigma_kt, smooth_tail_start, oenv); } for (i0 = 0; (i0 < n-2) && ((t[i0]-t[0]) < fit_start); i0++) { ; } if (i0 < n-2) { for (i = i0; (i < n); i++) { sc2 += sqr(ct[i]); sn2 += sqr(nt[i]); sk2 += sqr(kt[i]); sck += ct[i]*kt[i]; snk += nt[i]*kt[i]; scn += ct[i]*nt[i]; } printf("Hydrogen bond thermodynamics at T = %g K\n", temp); tmp = (sn2*sc2-sqr(scn)); if ((tmp > 0) && (sn2 > 0)) { k = (sn2*sck-scn*snk)/tmp; kp = (k*scn-snk)/sn2; if (bError) { chi2 = 0; for (i = i0; (i < n); i++) { chi2 += sqr(k*ct[i]-kp*nt[i]-kt[i]); } chi22 = compute_weighted_rates(n, t, ct, nt, kt, sigma_ct, sigma_nt, sigma_kt, &k, &kp, &sigma_k, &sigma_kp, fit_start); Q = quality_of_fit(chi2, 2); ddg = BOLTZ*temp*sigma_k/k; printf("Fitting paramaters chi^2 = %10g, Quality of fit = %10g\n", chi2, Q); printf("The Rate and Delta G are followed by an error estimate\n"); printf("----------------------------------------------------------\n" "Type Rate (1/ps) Sigma Time (ps) DG (kJ/mol) Sigma\n"); printf("Forward %10.3f %6.2f %8.3f %10.3f %6.2f\n", k, sigma_k, 1/k, calc_dg(1/k, temp), ddg); ddg = BOLTZ*temp*sigma_kp/kp; printf("Backward %10.3f %6.2f %8.3f %10.3f %6.2f\n", kp, sigma_kp, 1/kp, calc_dg(1/kp, temp), ddg); } else { chi2 = 0; for (i = i0; (i < n); i++) { chi2 += sqr(k*ct[i]-kp*nt[i]-kt[i]); } printf("Fitting parameters chi^2 = %10g\nQ = %10g\n", chi2, Q); printf("--------------------------------------------------\n" "Type Rate (1/ps) Time (ps) DG (kJ/mol) Chi^2\n"); printf("Forward %10.3f %8.3f %10.3f %10g\n", k, 1/k, calc_dg(1/k, temp), chi2); printf("Backward %10.3f %8.3f %10.3f\n", kp, 1/kp, calc_dg(1/kp, temp)); } } if (sc2 > 0) { kow = 2*sck/sc2; printf("One-way %10.3f %s%8.3f %10.3f\n", kow, bError ? " " : "", 1/kow, calc_dg(1/kow, temp)); } else { printf(" - Numerical problems computing HB thermodynamics:\n" "sc2 = %g sn2 = %g sk2 = %g sck = %g snk = %g scn = %g\n", sc2, sn2, sk2, sck, snk, scn); } /* Determine integral of the correlation function */ tau_hb = evaluate_integral(n, t, ct, NULL, (t[n-1]-t[0])/2, &dtau); printf("Integral %10.3f %s%8.3f %10.3f\n", 1/tau_hb, bError ? " " : "", tau_hb, calc_dg(tau_hb, temp)); e_1 = exp(-1); for (i = 0; (i < n-2); i++) { if ((ct[i] > e_1) && (ct[i+1] <= e_1)) { break; } } if (i < n-2) { /* Determine tau_relax from linear interpolation */ tau_rlx = t[i]-t[0] + (e_1-ct[i])*(t[i+1]-t[i])/(ct[i+1]-ct[i]); printf("Relaxation %10.3f %8.3f %s%10.3f\n", 1/tau_rlx, tau_rlx, bError ? " " : "", calc_dg(tau_rlx, temp)); } } else { printf("Correlation functions too short to compute thermodynamics\n"); } } void compute_derivative(int nn, real x[], real y[], real dydx[]) { int j; /* Compute k(t) = dc(t)/dt */ for (j = 1; (j < nn-1); j++) { dydx[j] = (y[j+1]-y[j-1])/(x[j+1]-x[j-1]); } /* Extrapolate endpoints */ dydx[0] = 2*dydx[1] - dydx[2]; dydx[nn-1] = 2*dydx[nn-2] - dydx[nn-3]; } static void parallel_print(int *data, int nThreads) { /* This prints the donors on which each tread is currently working. */ int i; fprintf(stderr, "\r"); for (i = 0; i < nThreads; i++) { fprintf(stderr, "%-7i", data[i]); } } static void normalizeACF(real *ct, real *gt, int nhb, int len) { real ct_fac, gt_fac; int i; /* Xu and Berne use the same normalization constant */ ct_fac = 1.0/ct[0]; gt_fac = (nhb == 0) ? 0 : 1.0/(real)nhb; printf("Normalization for c(t) = %g for gh(t) = %g\n", ct_fac, gt_fac); for (i = 0; i < len; i++) { ct[i] *= ct_fac; if (gt != NULL) { gt[i] *= gt_fac; } } } /* Added argument bContact in do_hbac(...). Also * added support for -contact in the actual code. * - Erik Marklund, May 31, 2006 */ /* Changed contact code and added argument R2 * - Erik Marklund, June 29, 2006 */ static void do_hbac(const char *fn, t_hbdata *hb, int nDump, gmx_bool bMerge, gmx_bool bContact, real fit_start, real temp, gmx_bool R2, real smooth_tail_start, const output_env_t oenv, t_gemParams *params, const char *gemType, int nThreads, const int NN, const gmx_bool bBallistic, const gmx_bool bGemFit) { FILE *fp; int i, j, k, m, n, o, nd, ihb, idist, n2, nn, iter, nSets; const char *legNN[] = { "Ac(t)", "Ac'(t)" }; static char **legGem; const char *legLuzar[] = { "Ac\\sfin sys\\v{}\\z{}(t)", "Ac(t)", "Cc\\scontact,hb\\v{}\\z{}(t)", "-dAc\\sfs\\v{}\\z{}/dt" }; gmx_bool bNorm = FALSE, bOMP = FALSE; double nhb = 0; int nhbi = 0; real *rhbex = NULL, *ht, *gt, *ght, *dght, *kt; real *ct, *p_ct, tail, tail2, dtail, ct_fac, ght_fac, *cct; const real tol = 1e-3; int nframes = hb->nframes, nf; unsigned int **h = NULL, **g = NULL; int nh, nhbonds, nhydro, ngh; t_hbond *hbh; PSTYPE p, *pfound = NULL, np; t_pShift *pHist; int *ptimes = NULL, *poff = NULL, anhb, n0, mMax = INT_MIN; real **rHbExGem = NULL; gmx_bool c; int acType; t_E *E; double *ctdouble, *timedouble, *fittedct; double fittolerance = 0.1; int *dondata = NULL, thisThread; enum { AC_NONE, AC_NN, AC_GEM, AC_LUZAR }; #ifdef GMX_OPENMP bOMP = TRUE; #else bOMP = FALSE; #endif printf("Doing autocorrelation "); /* Decide what kind of ACF calculations to do. */ if (NN > NN_NONE && NN < NN_NR) { #ifdef HAVE_NN_LOOPS acType = AC_NN; printf("using the energy estimate.\n"); #else acType = AC_NONE; printf("Can't do the NN-loop. Yet.\n"); #endif } else if (hb->bGem) { acType = AC_GEM; printf("according to the reversible geminate recombination model by Omer Markowitch.\n"); nSets = 1 + (bBallistic ? 1 : 0) + (bGemFit ? 1 : 0); snew(legGem, nSets); for (i = 0; i < nSets; i++) { snew(legGem[i], 128); } sprintf(legGem[0], "Ac\\s%s\\v{}\\z{}(t)", gemType); if (bBallistic) { sprintf(legGem[1], "Ac'(t)"); } if (bGemFit) { sprintf(legGem[(bBallistic ? 3 : 2)], "Ac\\s%s,fit\\v{}\\z{}(t)", gemType); } } else { acType = AC_LUZAR; printf("according to the theory of Luzar and Chandler.\n"); } fflush(stdout); /* build hbexist matrix in reals for autocorr */ /* Allocate memory for computing ACF (rhbex) and aggregating the ACF (ct) */ n2 = 1; while (n2 < nframes) { n2 *= 2; } nn = nframes/2; if (acType != AC_NN || bOMP) { snew(h, hb->maxhydro); snew(g, hb->maxhydro); } /* Dump hbonds for debugging */ dump_ac(hb, bMerge || bContact, nDump); /* Total number of hbonds analyzed here */ nhbonds = 0; ngh = 0; anhb = 0; if (acType != AC_LUZAR && bOMP) { nThreads = min((nThreads <= 0) ? INT_MAX : nThreads, gmx_omp_get_max_threads()); gmx_omp_set_num_threads(nThreads); snew(dondata, nThreads); for (i = 0; i < nThreads; i++) { dondata[i] = -1; } printf("ACF calculations parallelized with OpenMP using %i threads.\n" "Expect close to linear scaling over this donor-loop.\n", nThreads); fflush(stdout); fprintf(stderr, "Donors: [thread no]\n"); { char tmpstr[7]; for (i = 0; i < nThreads; i++) { snprintf(tmpstr, 7, "[%i]", i); fprintf(stderr, "%-7s", tmpstr); } } fprintf(stderr, "\n"); } /* Build the ACF according to acType */ switch (acType) { case AC_NN: #ifdef HAVE_NN_LOOPS /* Here we're using the estimated energy for the hydrogen bonds. */ snew(ct, nn); #pragma omp parallel \ private(i, j, k, nh, E, rhbex, thisThread) \ default(shared) { #pragma omp barrier thisThread = gmx_omp_get_thread_num(); rhbex = NULL; snew(rhbex, n2); memset(rhbex, 0, n2*sizeof(real)); /* Trust no-one, not even malloc()! */ #pragma omp barrier #pragma omp for schedule (dynamic) for (i = 0; i < hb->d.nrd; i++) /* loop over donors */ { if (bOMP) { #pragma omp critical { dondata[thisThread] = i; parallel_print(dondata, nThreads); } } else { fprintf(stderr, "\r %i", i); } for (j = 0; j < hb->a.nra; j++) /* loop over acceptors */ { for (nh = 0; nh < hb->d.nhydro[i]; nh++) /* loop over donors' hydrogens */ { E = hb->hbE.E[i][j][nh]; if (E != NULL) { for (k = 0; k < nframes; k++) { if (E[k] != NONSENSE_E) { rhbex[k] = (real)E[k]; } } low_do_autocorr(NULL, oenv, NULL, nframes, 1, -1, &(rhbex), hb->time[1]-hb->time[0], eacNormal, 1, FALSE, bNorm, FALSE, 0, -1, 0, 1); #pragma omp critical { for (k = 0; (k < nn); k++) { ct[k] += rhbex[k]; } } } } /* k loop */ } /* j loop */ } /* i loop */ sfree(rhbex); #pragma omp barrier } if (bOMP) { sfree(dondata); } normalizeACF(ct, NULL, 0, nn); snew(ctdouble, nn); snew(timedouble, nn); for (j = 0; j < nn; j++) { timedouble[j] = (double)(hb->time[j]); ctdouble[j] = (double)(ct[j]); } /* Remove ballistic term */ /* Ballistic component removal and fitting to the reversible geminate recombination model * will be taken out for the time being. First of all, one can remove the ballistic * component with g_analyze afterwards. Secondly, and more importantly, there are still * problems with the robustness of the fitting to the model. More work is needed. * A third reason is that we're currently using gsl for this and wish to reduce dependence * on external libraries. There are Levenberg-Marquardt and nsimplex solvers that come with * a BSD-licence that can do the job. * * - Erik Marklund, June 18 2010. */ /* if (params->ballistic/params->tDelta >= params->nExpFit*2+1) */ /* takeAwayBallistic(ctdouble, timedouble, nn, params->ballistic, params->nExpFit, params->bDt); */ /* else */ /* printf("\nNumber of data points is less than the number of parameters to fit\n." */ /* "The system is underdetermined, hence no ballistic term can be found.\n\n"); */ fp = xvgropen(fn, "Hydrogen Bond Autocorrelation", output_env_get_xvgr_tlabel(oenv), "C(t)"); xvgr_legend(fp, asize(legNN), legNN); for (j = 0; (j < nn); j++) { fprintf(fp, "%10g %10g %10g\n", hb->time[j]-hb->time[0], ct[j], ctdouble[j]); } xvgrclose(fp); sfree(ct); sfree(ctdouble); sfree(timedouble); #endif /* HAVE_NN_LOOPS */ break; /* case AC_NN */ case AC_GEM: snew(ct, 2*n2); memset(ct, 0, 2*n2*sizeof(real)); #ifndef GMX_OPENMP fprintf(stderr, "Donor:\n"); #define __ACDATA ct #else #define __ACDATA p_ct #endif #pragma omp parallel \ private(i, k, nh, hbh, pHist, h, g, n0, nf, np, j, m, \ pfound, poff, rHbExGem, p, ihb, mMax, \ thisThread, p_ct) \ default(shared) { /* ########## THE START OF THE ENORMOUS PARALLELIZED BLOCK! ########## */ h = NULL; g = NULL; thisThread = gmx_omp_get_thread_num(); snew(h, hb->maxhydro); snew(g, hb->maxhydro); mMax = INT_MIN; rHbExGem = NULL; poff = NULL; pfound = NULL; p_ct = NULL; snew(p_ct, 2*n2); memset(p_ct, 0, 2*n2*sizeof(real)); /* I'm using a chunk size of 1, since I expect \ * the overhead to be really small compared \ * to the actual calculations \ */ #pragma omp for schedule(dynamic,1) nowait for (i = 0; i < hb->d.nrd; i++) { if (bOMP) { #pragma omp critical { dondata[thisThread] = i; parallel_print(dondata, nThreads); } } else { fprintf(stderr, "\r %i", i); } for (k = 0; k < hb->a.nra; k++) { for (nh = 0; nh < ((bMerge || bContact) ? 1 : hb->d.nhydro[i]); nh++) { hbh = hb->hbmap[i][k]; if (hbh) { /* Note that if hb->per->gemtype==gemDD, then distances will be stored in * hb->hbmap[d][a].h array anyway, because the contact flag will be set. * hence, it's only with the gemAD mode that hb->hbmap[d][a].g will be used. */ pHist = &(hb->per->pHist[i][k]); if (ISHB(hbh->history[nh]) && pHist->len != 0) { { h[nh] = hbh->h[nh]; g[nh] = hb->per->gemtype == gemAD ? hbh->g[nh] : NULL; } n0 = hbh->n0; nf = hbh->nframes; /* count the number of periodic shifts encountered and store * them in separate arrays. */ np = 0; for (j = 0; j < pHist->len; j++) { p = pHist->p[j]; for (m = 0; m <= np; m++) { if (m == np) /* p not recognized in list. Add it and set up new array. */ { np++; if (np > hb->per->nper) { gmx_fatal(FARGS, "Too many pshifts. Something's utterly wrong here."); } if (m >= mMax) /* Extend the arrays. * Doing it like this, using mMax to keep track of the sizes, * eleviates the need for freeing and re-allocating the arrays * when taking on the next donor-acceptor pair */ { mMax = m; srenew(pfound, np); /* The list of found periodic shifts. */ srenew(rHbExGem, np); /* The hb existence functions (-aver_hb). */ snew(rHbExGem[m], 2*n2); srenew(poff, np); } { if (rHbExGem != NULL && rHbExGem[m] != NULL) { /* This must be done, as this array was most likey * used to store stuff in some previous iteration. */ memset(rHbExGem[m], 0, (sizeof(real)) * (2*n2)); } else { fprintf(stderr, "rHbExGem not initialized! m = %i\n", m); } } pfound[m] = p; poff[m] = -1; break; } /* m==np */ if (p == pfound[m]) { break; } } /* m: Loop over found shifts */ } /* j: Loop over shifts */ /* Now unpack and disentangle the existence funtions. */ for (j = 0; j < nf; j++) { /* i: donor, * k: acceptor * nh: hydrogen * j: time * p: periodic shift * pfound: list of periodic shifts found for this pair. * poff: list of frame offsets; that is, the first * frame a hbond has a particular periodic shift. */ p = getPshift(*pHist, j+n0); if (p != -1) { for (m = 0; m < np; m++) { if (pfound[m] == p) { break; } if (m == (np-1)) { gmx_fatal(FARGS, "Shift not found, but must be there."); } } ihb = is_hb(h[nh], j) || ((hb->per->gemtype != gemAD || j == 0) ? FALSE : is_hb(g[nh], j)); if (ihb) { if (poff[m] == -1) { poff[m] = j; /* Here's where the first hbond with shift p is, * relative to the start of h[0].*/ } if (j < poff[m]) { gmx_fatal(FARGS, "j<poff[m]"); } rHbExGem[m][j-poff[m]] += 1; } } } /* Now, build ac. */ for (m = 0; m < np; m++) { if (rHbExGem[m][0] > 0 && n0+poff[m] < nn /* && m==0 */) { low_do_autocorr(NULL, oenv, NULL, nframes, 1, -1, &(rHbExGem[m]), hb->time[1]-hb->time[0], eacNormal, 1, FALSE, bNorm, FALSE, 0, -1, 0, 1); for (j = 0; (j < nn); j++) { __ACDATA[j] += rHbExGem[m][j]; } } } /* Building of ac. */ } /* if (ISHB(...*/ } /* if (hbh) */ } /* hydrogen loop */ } /* acceptor loop */ } /* donor loop */ for (m = 0; m <= mMax; m++) { sfree(rHbExGem[m]); } sfree(pfound); sfree(poff); sfree(rHbExGem); sfree(h); sfree(g); if (bOMP) { #pragma omp critical { for (i = 0; i < nn; i++) { ct[i] += p_ct[i]; } } sfree(p_ct); } } /* ########## THE END OF THE ENORMOUS PARALLELIZED BLOCK ########## */ if (bOMP) { sfree(dondata); } normalizeACF(ct, NULL, 0, nn); fprintf(stderr, "\n\nACF successfully calculated.\n"); /* Use this part to fit to geminate recombination - JCP 129, 84505 (2008) */ snew(ctdouble, nn); snew(timedouble, nn); snew(fittedct, nn); for (j = 0; j < nn; j++) { timedouble[j] = (double)(hb->time[j]); ctdouble[j] = (double)(ct[j]); } /* Remove ballistic term */ /* Ballistic component removal and fitting to the reversible geminate recombination model * will be taken out for the time being. First of all, one can remove the ballistic * component with g_analyze afterwards. Secondly, and more importantly, there are still * problems with the robustness of the fitting to the model. More work is needed. * A third reason is that we're currently using gsl for this and wish to reduce dependence * on external libraries. There are Levenberg-Marquardt and nsimplex solvers that come with * a BSD-licence that can do the job. * * - Erik Marklund, June 18 2010. */ /* if (bBallistic) { */ /* if (params->ballistic/params->tDelta >= params->nExpFit*2+1) */ /* takeAwayBallistic(ctdouble, timedouble, nn, params->ballistic, params->nExpFit, params->bDt); */ /* else */ /* printf("\nNumber of data points is less than the number of parameters to fit\n." */ /* "The system is underdetermined, hence no ballistic term can be found.\n\n"); */ /* } */ /* if (bGemFit) */ /* fitGemRecomb(ctdouble, timedouble, &fittedct, nn, params); */ if (bContact) { fp = xvgropen(fn, "Contact Autocorrelation", output_env_get_xvgr_tlabel(oenv), "C(t)", oenv); } else { fp = xvgropen(fn, "Hydrogen Bond Autocorrelation", output_env_get_xvgr_tlabel(oenv), "C(t)", oenv); } xvgr_legend(fp, asize(legGem), (const char**)legGem, oenv); for (j = 0; (j < nn); j++) { fprintf(fp, "%10g %10g", hb->time[j]-hb->time[0], ct[j]); if (bBallistic) { fprintf(fp, " %10g", ctdouble[j]); } if (bGemFit) { fprintf(fp, " %10g", fittedct[j]); } fprintf(fp, "\n"); } xvgrclose(fp); sfree(ctdouble); sfree(timedouble); sfree(fittedct); sfree(ct); break; /* case AC_GEM */ case AC_LUZAR: snew(rhbex, 2*n2); snew(ct, 2*n2); snew(gt, 2*n2); snew(ht, 2*n2); snew(ght, 2*n2); snew(dght, 2*n2); snew(kt, nn); snew(cct, nn); for (i = 0; (i < hb->d.nrd); i++) { for (k = 0; (k < hb->a.nra); k++) { nhydro = 0; hbh = hb->hbmap[i][k]; if (hbh) { if (bMerge || bContact) { if (ISHB(hbh->history[0])) { h[0] = hbh->h[0]; g[0] = hbh->g[0]; nhydro = 1; } } else { for (m = 0; (m < hb->maxhydro); m++) { if (bContact ? ISDIST(hbh->history[m]) : ISHB(hbh->history[m])) { g[nhydro] = hbh->g[m]; h[nhydro] = hbh->h[m]; nhydro++; } } } nf = hbh->nframes; for (nh = 0; (nh < nhydro); nh++) { int nrint = bContact ? hb->nrdist : hb->nrhb; if ((((nhbonds+1) % 10) == 0) || (nhbonds+1 == nrint)) { fprintf(stderr, "\rACF %d/%d", nhbonds+1, nrint); } nhbonds++; for (j = 0; (j < nframes); j++) { /* Changed '<' into '<=' below, just like I did in the hbm-output-loop in the gmx_hbond() block. - Erik Marklund, May 31, 2006 */ if (j <= nf) { ihb = is_hb(h[nh], j); idist = is_hb(g[nh], j); } else { ihb = idist = 0; } rhbex[j] = ihb; /* For contacts: if a second cut-off is provided, use it, * otherwise use g(t) = 1-h(t) */ if (!R2 && bContact) { gt[j] = 1-ihb; } else { gt[j] = idist*(1-ihb); } ht[j] = rhbex[j]; nhb += ihb; } /* The autocorrelation function is normalized after summation only */ low_do_autocorr(NULL, oenv, NULL, nframes, 1, -1, &rhbex, hb->time[1]-hb->time[0], eacNormal, 1, FALSE, bNorm, FALSE, 0, -1, 0, 1); /* Cross correlation analysis for thermodynamics */ for (j = nframes; (j < n2); j++) { ht[j] = 0; gt[j] = 0; } cross_corr(n2, ht, gt, dght); for (j = 0; (j < nn); j++) { ct[j] += rhbex[j]; ght[j] += dght[j]; } } } } } fprintf(stderr, "\n"); sfree(h); sfree(g); normalizeACF(ct, ght, nhb, nn); /* Determine tail value for statistics */ tail = 0; tail2 = 0; for (j = nn/2; (j < nn); j++) { tail += ct[j]; tail2 += ct[j]*ct[j]; } tail /= (nn - nn/2); tail2 /= (nn - nn/2); dtail = sqrt(tail2-tail*tail); /* Check whether the ACF is long enough */ if (dtail > tol) { printf("\nWARNING: Correlation function is probably not long enough\n" "because the standard deviation in the tail of C(t) > %g\n" "Tail value (average C(t) over second half of acf): %g +/- %g\n", tol, tail, dtail); } for (j = 0; (j < nn); j++) { cct[j] = ct[j]; ct[j] = (cct[j]-tail)/(1-tail); } /* Compute negative derivative k(t) = -dc(t)/dt */ compute_derivative(nn, hb->time, ct, kt); for (j = 0; (j < nn); j++) { kt[j] = -kt[j]; } if (bContact) { fp = xvgropen(fn, "Contact Autocorrelation", output_env_get_xvgr_tlabel(oenv), "C(t)", oenv); } else { fp = xvgropen(fn, "Hydrogen Bond Autocorrelation", output_env_get_xvgr_tlabel(oenv), "C(t)", oenv); } xvgr_legend(fp, asize(legLuzar), legLuzar, oenv); for (j = 0; (j < nn); j++) { fprintf(fp, "%10g %10g %10g %10g %10g\n", hb->time[j]-hb->time[0], ct[j], cct[j], ght[j], kt[j]); } ffclose(fp); analyse_corr(nn, hb->time, ct, ght, kt, NULL, NULL, NULL, fit_start, temp, smooth_tail_start, oenv); do_view(oenv, fn, NULL); sfree(rhbex); sfree(ct); sfree(gt); sfree(ht); sfree(ght); sfree(dght); sfree(cct); sfree(kt); /* sfree(h); */ /* sfree(g); */ break; /* case AC_LUZAR */ default: gmx_fatal(FARGS, "Unrecognized type of ACF-calulation. acType = %i.", acType); } /* switch (acType) */ } static void init_hbframe(t_hbdata *hb, int nframes, real t) { int i, j, m; hb->time[nframes] = t; hb->nhb[nframes] = 0; hb->ndist[nframes] = 0; for (i = 0; (i < max_hx); i++) { hb->nhx[nframes][i] = 0; } /* Loop invalidated */ if (hb->bHBmap && 0) { for (i = 0; (i < hb->d.nrd); i++) { for (j = 0; (j < hb->a.nra); j++) { for (m = 0; (m < hb->maxhydro); m++) { if (hb->hbmap[i][j] && hb->hbmap[i][j]->h[m]) { set_hb(hb, i, m, j, nframes, HB_NO); } } } } } /*set_hb(hb->hbmap[i][j]->h[m],nframes-hb->hbmap[i][j]->n0,HB_NO);*/ } static void analyse_donor_props(const char *fn, t_hbdata *hb, int nframes, real t, const output_env_t oenv) { static FILE *fp = NULL; const char *leg[] = { "Nbound", "Nfree" }; int i, j, k, nbound, nb, nhtot; if (!fn) { return; } if (!fp) { fp = xvgropen(fn, "Donor properties", output_env_get_xvgr_tlabel(oenv), "Number", oenv); xvgr_legend(fp, asize(leg), leg, oenv); } nbound = 0; nhtot = 0; for (i = 0; (i < hb->d.nrd); i++) { for (k = 0; (k < hb->d.nhydro[i]); k++) { nb = 0; nhtot++; for (j = 0; (j < hb->a.nra) && (nb == 0); j++) { if (hb->hbmap[i][j] && hb->hbmap[i][j]->h[k] && is_hb(hb->hbmap[i][j]->h[k], nframes)) { nb = 1; } } nbound += nb; } } fprintf(fp, "%10.3e %6d %6d\n", t, nbound, nhtot-nbound); } static void dump_hbmap(t_hbdata *hb, int nfile, t_filenm fnm[], gmx_bool bTwo, gmx_bool bContact, int isize[], int *index[], char *grpnames[], t_atoms *atoms) { FILE *fp, *fplog; int ddd, hhh, aaa, i, j, k, m, grp; char ds[32], hs[32], as[32]; gmx_bool first; fp = opt2FILE("-hbn", nfile, fnm, "w"); if (opt2bSet("-g", nfile, fnm)) { fplog = ffopen(opt2fn("-g", nfile, fnm), "w"); fprintf(fplog, "# %10s %12s %12s\n", "Donor", "Hydrogen", "Acceptor"); } else { fplog = NULL; } for (grp = gr0; grp <= (bTwo ? gr1 : gr0); grp++) { fprintf(fp, "[ %s ]", grpnames[grp]); for (i = 0; i < isize[grp]; i++) { fprintf(fp, (i%15) ? " " : "\n"); fprintf(fp, " %4u", index[grp][i]+1); } fprintf(fp, "\n"); /* Added -contact support below. - Erik Marklund, May 29, 2006 */ if (!bContact) { fprintf(fp, "[ donors_hydrogens_%s ]\n", grpnames[grp]); for (i = 0; (i < hb->d.nrd); i++) { if (hb->d.grp[i] == grp) { for (j = 0; (j < hb->d.nhydro[i]); j++) { fprintf(fp, " %4u %4u", hb->d.don[i]+1, hb->d.hydro[i][j]+1); } fprintf(fp, "\n"); } } first = TRUE; fprintf(fp, "[ acceptors_%s ]", grpnames[grp]); for (i = 0; (i < hb->a.nra); i++) { if (hb->a.grp[i] == grp) { fprintf(fp, (i%15 && !first) ? " " : "\n"); fprintf(fp, " %4u", hb->a.acc[i]+1); first = FALSE; } } fprintf(fp, "\n"); } } if (bTwo) { fprintf(fp, bContact ? "[ contacts_%s-%s ]\n" : "[ hbonds_%s-%s ]\n", grpnames[0], grpnames[1]); } else { fprintf(fp, bContact ? "[ contacts_%s ]" : "[ hbonds_%s ]\n", grpnames[0]); } for (i = 0; (i < hb->d.nrd); i++) { ddd = hb->d.don[i]; for (k = 0; (k < hb->a.nra); k++) { aaa = hb->a.acc[k]; for (m = 0; (m < hb->d.nhydro[i]); m++) { if (hb->hbmap[i][k] && ISHB(hb->hbmap[i][k]->history[m])) { sprintf(ds, "%s", mkatomname(atoms, ddd)); sprintf(as, "%s", mkatomname(atoms, aaa)); if (bContact) { fprintf(fp, " %6u %6u\n", ddd+1, aaa+1); if (fplog) { fprintf(fplog, "%12s %12s\n", ds, as); } } else { hhh = hb->d.hydro[i][m]; sprintf(hs, "%s", mkatomname(atoms, hhh)); fprintf(fp, " %6u %6u %6u\n", ddd+1, hhh+1, aaa+1); if (fplog) { fprintf(fplog, "%12s %12s %12s\n", ds, hs, as); } } } } } } ffclose(fp); if (fplog) { ffclose(fplog); } } /* sync_hbdata() updates the parallel t_hbdata p_hb using hb as template. * It mimics add_frames() and init_frame() to some extent. */ static void sync_hbdata(t_hbdata *hb, t_hbdata *p_hb, int nframes, real t) { int i; if (nframes >= p_hb->max_frames) { p_hb->max_frames += 4096; srenew(p_hb->nhb, p_hb->max_frames); srenew(p_hb->ndist, p_hb->max_frames); srenew(p_hb->n_bound, p_hb->max_frames); srenew(p_hb->nhx, p_hb->max_frames); if (p_hb->bDAnr) { srenew(p_hb->danr, p_hb->max_frames); } memset(&(p_hb->nhb[nframes]), 0, sizeof(int) * (p_hb->max_frames-nframes)); memset(&(p_hb->ndist[nframes]), 0, sizeof(int) * (p_hb->max_frames-nframes)); p_hb->nhb[nframes] = 0; p_hb->ndist[nframes] = 0; } p_hb->nframes = nframes; /* for (i=0;) */ /* { */ /* p_hb->nhx[nframes][i] */ /* } */ memset(&(p_hb->nhx[nframes]), 0, sizeof(int)*max_hx); /* zero the helix count for this frame */ /* hb->per will remain constant througout the frame loop, * even though the data its members point to will change, * hence no need for re-syncing. */ } int gmx_hbond(int argc, char *argv[]) { const char *desc[] = { "[TT]g_hbond[tt] computes and analyzes hydrogen bonds. Hydrogen bonds are", "determined based on cutoffs for the angle Hydrogen - Donor - Acceptor", "(zero is extended) and the distance Donor - Acceptor", "(or Hydrogen - Acceptor using [TT]-noda[tt]).", "OH and NH groups are regarded as donors, O is an acceptor always,", "N is an acceptor by default, but this can be switched using", "[TT]-nitacc[tt]. Dummy hydrogen atoms are assumed to be connected", "to the first preceding non-hydrogen atom.[PAR]", "You need to specify two groups for analysis, which must be either", "identical or non-overlapping. All hydrogen bonds between the two", "groups are analyzed.[PAR]", "If you set [TT]-shell[tt], you will be asked for an additional index group", "which should contain exactly one atom. In this case, only hydrogen", "bonds between atoms within the shell distance from the one atom are", "considered.[PAR]", "With option -ac, rate constants for hydrogen bonding can be derived with the model of Luzar and Chandler", "(Nature 394, 1996; J. Chem. Phys. 113:23, 2000) or that of Markovitz and Agmon (J. Chem. Phys 129, 2008).", "If contact kinetics are analyzed by using the -contact option, then", "n(t) can be defined as either all pairs that are not within contact distance r at time t", "(corresponding to leaving the -r2 option at the default value 0) or all pairs that", "are within distance r2 (corresponding to setting a second cut-off value with option -r2).", "See mentioned literature for more details and definitions." "[PAR]", /* "It is also possible to analyse specific hydrogen bonds with", "[TT]-sel[tt]. This index file must contain a group of atom triplets", "Donor Hydrogen Acceptor, in the following way:[PAR]", */ "[TT]", "[ selected ][BR]", " 20 21 24[BR]", " 25 26 29[BR]", " 1 3 6[BR]", "[tt][BR]", "Note that the triplets need not be on separate lines.", "Each atom triplet specifies a hydrogen bond to be analyzed,", "note also that no check is made for the types of atoms.[PAR]", "[BB]Output:[bb][BR]", "[TT]-num[tt]: number of hydrogen bonds as a function of time.[BR]", "[TT]-ac[tt]: average over all autocorrelations of the existence", "functions (either 0 or 1) of all hydrogen bonds.[BR]", "[TT]-dist[tt]: distance distribution of all hydrogen bonds.[BR]", "[TT]-ang[tt]: angle distribution of all hydrogen bonds.[BR]", "[TT]-hx[tt]: the number of n-n+i hydrogen bonds as a function of time", "where n and n+i stand for residue numbers and i ranges from 0 to 6.", "This includes the n-n+3, n-n+4 and n-n+5 hydrogen bonds associated", "with helices in proteins.[BR]", "[TT]-hbn[tt]: all selected groups, donors, hydrogens and acceptors", "for selected groups, all hydrogen bonded atoms from all groups and", "all solvent atoms involved in insertion.[BR]", "[TT]-hbm[tt]: existence matrix for all hydrogen bonds over all", "frames, this also contains information on solvent insertion", "into hydrogen bonds. Ordering is identical to that in [TT]-hbn[tt]", "index file.[BR]", "[TT]-dan[tt]: write out the number of donors and acceptors analyzed for", "each timeframe. This is especially useful when using [TT]-shell[tt].[BR]", "[TT]-nhbdist[tt]: compute the number of HBonds per hydrogen in order to", "compare results to Raman Spectroscopy.", "[PAR]", "Note: options [TT]-ac[tt], [TT]-life[tt], [TT]-hbn[tt] and [TT]-hbm[tt]", "require an amount of memory proportional to the total numbers of donors", "times the total number of acceptors in the selected group(s)." }; static real acut = 30, abin = 1, rcut = 0.35, r2cut = 0, rbin = 0.005, rshell = -1; static real maxnhb = 0, fit_start = 1, fit_end = 60, temp = 298.15, smooth_tail_start = -1, D = -1; static gmx_bool bNitAcc = TRUE, bDA = TRUE, bMerge = TRUE; static int nDump = 0, nFitPoints = 100; static int nThreads = 0, nBalExp = 4; static gmx_bool bContact = FALSE, bBallistic = FALSE, bBallisticDt = FALSE, bGemFit = FALSE; static real logAfterTime = 10, gemBallistic = 0.2; /* ps */ static const char *NNtype[] = {NULL, "none", "binary", "oneOverR3", "dipole", NULL}; /* options */ t_pargs pa [] = { { "-a", FALSE, etREAL, {&acut}, "Cutoff angle (degrees, Hydrogen - Donor - Acceptor)" }, { "-r", FALSE, etREAL, {&rcut}, "Cutoff radius (nm, X - Acceptor, see next option)" }, { "-da", FALSE, etBOOL, {&bDA}, "Use distance Donor-Acceptor (if TRUE) or Hydrogen-Acceptor (FALSE)" }, { "-r2", FALSE, etREAL, {&r2cut}, "Second cutoff radius. Mainly useful with [TT]-contact[tt] and [TT]-ac[tt]"}, { "-abin", FALSE, etREAL, {&abin}, "Binwidth angle distribution (degrees)" }, { "-rbin", FALSE, etREAL, {&rbin}, "Binwidth distance distribution (nm)" }, { "-nitacc", FALSE, etBOOL, {&bNitAcc}, "Regard nitrogen atoms as acceptors" }, { "-contact", FALSE, etBOOL, {&bContact}, "Do not look for hydrogen bonds, but merely for contacts within the cut-off distance" }, { "-shell", FALSE, etREAL, {&rshell}, "when > 0, only calculate hydrogen bonds within # nm shell around " "one particle" }, { "-fitstart", FALSE, etREAL, {&fit_start}, "Time (ps) from which to start fitting the correlation functions in order to obtain the forward and backward rate constants for HB breaking and formation. With [TT]-gemfit[tt] we suggest [TT]-fitstart 0[tt]" }, { "-fitstart", FALSE, etREAL, {&fit_start}, "Time (ps) to which to stop fitting the correlation functions in order to obtain the forward and backward rate constants for HB breaking and formation (only with [TT]-gemfit[tt])" }, { "-temp", FALSE, etREAL, {&temp}, "Temperature (K) for computing the Gibbs energy corresponding to HB breaking and reforming" }, { "-smooth", FALSE, etREAL, {&smooth_tail_start}, "If >= 0, the tail of the ACF will be smoothed by fitting it to an exponential function: y = A exp(-x/[GRK]tau[grk])" }, { "-dump", FALSE, etINT, {&nDump}, "Dump the first N hydrogen bond ACFs in a single [TT].xvg[tt] file for debugging" }, { "-max_hb", FALSE, etREAL, {&maxnhb}, "Theoretical maximum number of hydrogen bonds used for normalizing HB autocorrelation function. Can be useful in case the program estimates it wrongly" }, { "-merge", FALSE, etBOOL, {&bMerge}, "H-bonds between the same donor and acceptor, but with different hydrogen are treated as a single H-bond. Mainly important for the ACF." }, { "-geminate", FALSE, etENUM, {gemType}, "Use reversible geminate recombination for the kinetics/thermodynamics calclations. See Markovitch et al., J. Chem. Phys 129, 084505 (2008) for details."}, { "-diff", FALSE, etREAL, {&D}, "Dffusion coefficient to use in the reversible geminate recombination kinetic model. If negative, then it will be fitted to the ACF along with ka and kd."}, #ifdef GMX_OPENMP { "-nthreads", FALSE, etINT, {&nThreads}, "Number of threads used for the parallel loop over autocorrelations. nThreads <= 0 means maximum number of threads. Requires linking with OpenMP. The number of threads is limited by the number of processors (before OpenMP v.3 ) or environment variable OMP_THREAD_LIMIT (OpenMP v.3)"}, #endif }; const char *bugs[] = { "The option [TT]-sel[tt] that used to work on selected hbonds is out of order, and therefore not available for the time being." }; t_filenm fnm[] = { { efTRX, "-f", NULL, ffREAD }, { efTPX, NULL, NULL, ffREAD }, { efNDX, NULL, NULL, ffOPTRD }, /* { efNDX, "-sel", "select", ffOPTRD },*/ { efXVG, "-num", "hbnum", ffWRITE }, { efLOG, "-g", "hbond", ffOPTWR }, { efXVG, "-ac", "hbac", ffOPTWR }, { efXVG, "-dist", "hbdist", ffOPTWR }, { efXVG, "-ang", "hbang", ffOPTWR }, { efXVG, "-hx", "hbhelix", ffOPTWR }, { efNDX, "-hbn", "hbond", ffOPTWR }, { efXPM, "-hbm", "hbmap", ffOPTWR }, { efXVG, "-don", "donor", ffOPTWR }, { efXVG, "-dan", "danum", ffOPTWR }, { efXVG, "-life", "hblife", ffOPTWR }, { efXVG, "-nhbdist", "nhbdist", ffOPTWR } }; #define NFILE asize(fnm) char hbmap [HB_NR] = { ' ', 'o', '-', '*' }; const char *hbdesc[HB_NR] = { "None", "Present", "Inserted", "Present & Inserted" }; t_rgb hbrgb [HB_NR] = { {1, 1, 1}, {1, 0, 0}, {0, 0, 1}, {1, 0, 1} }; t_trxstatus *status; int trrStatus = 1; t_topology top; t_inputrec ir; t_pargs *ppa; int npargs, natoms, nframes = 0, shatom; int *isize; char **grpnames; atom_id **index; rvec *x, hbox; matrix box; real t, ccut, dist = 0.0, ang = 0.0; double max_nhb, aver_nhb, aver_dist; int h = 0, i = 0, j, k = 0, l, start, end, id, ja, ogrp, nsel; int xi, yi, zi, ai; int xj, yj, zj, aj, xjj, yjj, zjj; int xk, yk, zk, ak, xkk, ykk, zkk; gmx_bool bSelected, bHBmap, bStop, bTwo, was, bBox, bTric; int *adist, *rdist, *aptr, *rprt; int grp, nabin, nrbin, bin, resdist, ihb; char **leg; t_hbdata *hb, *hbptr; FILE *fp, *fpins = NULL, *fpnhb = NULL; t_gridcell ***grid; t_ncell *icell, *jcell, *kcell; ivec ngrid; unsigned char *datable; output_env_t oenv; int gemmode, NN; PSTYPE peri = 0; t_E E; int ii, jj, hh, actual_nThreads; int threadNr = 0; gmx_bool bGem, bNN, bParallel; t_gemParams *params = NULL; gmx_bool bEdge_yjj, bEdge_xjj, bOMP; t_hbdata **p_hb = NULL; /* one per thread, then merge after the frame loop */ int **p_adist = NULL, **p_rdist = NULL; /* a histogram for each thread. */ #ifdef GMX_OPENMP bOMP = TRUE; #else bOMP = FALSE; #endif CopyRight(stderr, argv[0]); npargs = asize(pa); ppa = add_acf_pargs(&npargs, pa); parse_common_args(&argc, argv, PCA_CAN_TIME | PCA_TIME_UNIT | PCA_BE_NICE, NFILE, fnm, npargs, ppa, asize(desc), desc, asize(bugs), bugs, &oenv); /* NN-loop? If so, what estimator to use ?*/ NN = 1; /* Outcommented for now DvdS 2010-07-13 while (NN < NN_NR && gmx_strcasecmp(NNtype[0], NNtype[NN])!=0) NN++; if (NN == NN_NR) gmx_fatal(FARGS, "Invalid NN-loop type."); */ bNN = FALSE; for (i = 2; bNN == FALSE && i < NN_NR; i++) { bNN = bNN || NN == i; } if (NN > NN_NONE && bMerge) { bMerge = FALSE; } /* geminate recombination? If so, which flavor? */ gemmode = 1; while (gemmode < gemNR && gmx_strcasecmp(gemType[0], gemType[gemmode]) != 0) { gemmode++; } if (gemmode == gemNR) { gmx_fatal(FARGS, "Invalid recombination type."); } bGem = FALSE; for (i = 2; bGem == FALSE && i < gemNR; i++) { bGem = bGem || gemmode == i; } if (bGem) { printf("Geminate recombination: %s\n", gemType[gemmode]); #ifndef HAVE_LIBGSL printf("Note that some aspects of reversible geminate recombination won't work without gsl.\n"); #endif if (bContact) { if (gemmode != gemDD) { printf("Turning off -contact option...\n"); bContact = FALSE; } } else { if (gemmode == gemDD) { printf("Turning on -contact option...\n"); bContact = TRUE; } } if (bMerge) { if (gemmode == gemAA) { printf("Turning off -merge option...\n"); bMerge = FALSE; } } else { if (gemmode != gemAA) { printf("Turning on -merge option...\n"); bMerge = TRUE; } } } /* process input */ bSelected = FALSE; ccut = cos(acut*DEG2RAD); if (bContact) { if (bSelected) { gmx_fatal(FARGS, "Can not analyze selected contacts."); } if (!bDA) { gmx_fatal(FARGS, "Can not analyze contact between H and A: turn off -noda"); } } /* Initiate main data structure! */ bHBmap = (opt2bSet("-ac", NFILE, fnm) || opt2bSet("-life", NFILE, fnm) || opt2bSet("-hbn", NFILE, fnm) || opt2bSet("-hbm", NFILE, fnm) || bGem); if (opt2bSet("-nhbdist", NFILE, fnm)) { const char *leg[MAXHH+1] = { "0 HBs", "1 HB", "2 HBs", "3 HBs", "Total" }; fpnhb = xvgropen(opt2fn("-nhbdist", NFILE, fnm), "Number of donor-H with N HBs", output_env_get_xvgr_tlabel(oenv), "N", oenv); xvgr_legend(fpnhb, asize(leg), leg, oenv); } hb = mk_hbdata(bHBmap, opt2bSet("-dan", NFILE, fnm), bMerge || bContact, bGem, gemmode); /* get topology */ read_tpx_top(ftp2fn(efTPX, NFILE, fnm), &ir, box, &natoms, NULL, NULL, NULL, &top); snew(grpnames, grNR); snew(index, grNR); snew(isize, grNR); /* Make Donor-Acceptor table */ snew(datable, top.atoms.nr); gen_datable(index[0], isize[0], datable, top.atoms.nr); if (bSelected) { /* analyze selected hydrogen bonds */ printf("Select group with selected atoms:\n"); get_index(&(top.atoms), opt2fn("-sel", NFILE, fnm), 1, &nsel, index, grpnames); if (nsel % 3) { gmx_fatal(FARGS, "Number of atoms in group '%s' not a multiple of 3\n" "and therefore cannot contain triplets of " "Donor-Hydrogen-Acceptor", grpnames[0]); } bTwo = FALSE; for (i = 0; (i < nsel); i += 3) { int dd = index[0][i]; int aa = index[0][i+2]; /* int */ hh = index[0][i+1]; add_dh (&hb->d, dd, hh, i, datable); add_acc(&hb->a, aa, i); /* Should this be here ? */ snew(hb->d.dptr, top.atoms.nr); snew(hb->a.aptr, top.atoms.nr); add_hbond(hb, dd, aa, hh, gr0, gr0, 0, bMerge, 0, bContact, peri); } printf("Analyzing %d selected hydrogen bonds from '%s'\n", isize[0], grpnames[0]); } else { /* analyze all hydrogen bonds: get group(s) */ printf("Specify 2 groups to analyze:\n"); get_index(&(top.atoms), ftp2fn_null(efNDX, NFILE, fnm), 2, isize, index, grpnames); /* check if we have two identical or two non-overlapping groups */ bTwo = isize[0] != isize[1]; for (i = 0; (i < isize[0]) && !bTwo; i++) { bTwo = index[0][i] != index[1][i]; } if (bTwo) { printf("Checking for overlap in atoms between %s and %s\n", grpnames[0], grpnames[1]); for (i = 0; i < isize[1]; i++) { if (ISINGRP(datable[index[1][i]])) { gmx_fatal(FARGS, "Partial overlap between groups '%s' and '%s'", grpnames[0], grpnames[1]); } } /* printf("Checking for overlap in atoms between %s and %s\n", grpnames[0],grpnames[1]); for (i=0; i<isize[0]; i++) for (j=0; j<isize[1]; j++) if (index[0][i] == index[1][j]) gmx_fatal(FARGS,"Partial overlap between groups '%s' and '%s'", grpnames[0],grpnames[1]); */ } if (bTwo) { printf("Calculating %s " "between %s (%d atoms) and %s (%d atoms)\n", bContact ? "contacts" : "hydrogen bonds", grpnames[0], isize[0], grpnames[1], isize[1]); } else { fprintf(stderr, "Calculating %s in %s (%d atoms)\n", bContact ? "contacts" : "hydrogen bonds", grpnames[0], isize[0]); } } sfree(datable); /* search donors and acceptors in groups */ snew(datable, top.atoms.nr); for (i = 0; (i < grNR); i++) { if ( ((i == gr0) && !bSelected ) || ((i == gr1) && bTwo )) { gen_datable(index[i], isize[i], datable, top.atoms.nr); if (bContact) { search_acceptors(&top, isize[i], index[i], &hb->a, i, bNitAcc, TRUE, (bTwo && (i == gr0)) || !bTwo, datable); search_donors (&top, isize[i], index[i], &hb->d, i, TRUE, (bTwo && (i == gr1)) || !bTwo, datable); } else { search_acceptors(&top, isize[i], index[i], &hb->a, i, bNitAcc, FALSE, TRUE, datable); search_donors (&top, isize[i], index[i], &hb->d, i, FALSE, TRUE, datable); } if (bTwo) { clear_datable_grp(datable, top.atoms.nr); } } } sfree(datable); printf("Found %d donors and %d acceptors\n", hb->d.nrd, hb->a.nra); /*if (bSelected) snew(donors[gr0D], dons[gr0D].nrd);*/ if (bHBmap) { printf("Making hbmap structure..."); /* Generate hbond data structure */ mk_hbmap(hb, bTwo); printf("done.\n"); } #ifdef HAVE_NN_LOOPS if (bNN) { mk_hbEmap(hb, 0); } #endif if (bGem) { printf("Making per structure..."); /* Generate hbond data structure */ mk_per(hb); printf("done.\n"); } /* check input */ bStop = FALSE; if (hb->d.nrd + hb->a.nra == 0) { printf("No Donors or Acceptors found\n"); bStop = TRUE; } if (!bStop) { if (hb->d.nrd == 0) { printf("No Donors found\n"); bStop = TRUE; } if (hb->a.nra == 0) { printf("No Acceptors found\n"); bStop = TRUE; } } if (bStop) { gmx_fatal(FARGS, "Nothing to be done"); } shatom = 0; if (rshell > 0) { int shisz; atom_id *shidx; char *shgrpnm; /* get index group with atom for shell */ do { printf("Select atom for shell (1 atom):\n"); get_index(&(top.atoms), ftp2fn_null(efNDX, NFILE, fnm), 1, &shisz, &shidx, &shgrpnm); if (shisz != 1) { printf("group contains %d atoms, should be 1 (one)\n", shisz); } } while (shisz != 1); shatom = shidx[0]; printf("Will calculate hydrogen bonds within a shell " "of %g nm around atom %i\n", rshell, shatom+1); } /* Analyze trajectory */ natoms = read_first_x(oenv, &status, ftp2fn(efTRX, NFILE, fnm), &t, &x, box); if (natoms > top.atoms.nr) { gmx_fatal(FARGS, "Topology (%d atoms) does not match trajectory (%d atoms)", top.atoms.nr, natoms); } bBox = ir.ePBC != epbcNONE; grid = init_grid(bBox, box, (rcut > r2cut) ? rcut : r2cut, ngrid); nabin = acut/abin; nrbin = rcut/rbin; snew(adist, nabin+1); snew(rdist, nrbin+1); if (bGem && !bBox) { gmx_fatal(FARGS, "Can't do geminate recombination without periodic box."); } bParallel = FALSE; #ifndef GMX_OPENMP #define __ADIST adist #define __RDIST rdist #define __HBDATA hb #else /* GMX_OPENMP ================================================== \ * Set up the OpenMP stuff, | * like the number of threads and such | * Also start the parallel loop. | */ #define __ADIST p_adist[threadNr] #define __RDIST p_rdist[threadNr] #define __HBDATA p_hb[threadNr] #endif if (bOMP) { bParallel = !bSelected; if (bParallel) { actual_nThreads = min((nThreads <= 0) ? INT_MAX : nThreads, gmx_omp_get_max_threads()); gmx_omp_set_num_threads(actual_nThreads); printf("Frame loop parallelized with OpenMP using %i threads.\n", actual_nThreads); fflush(stdout); } else { actual_nThreads = 1; } snew(p_hb, actual_nThreads); snew(p_adist, actual_nThreads); snew(p_rdist, actual_nThreads); for (i = 0; i < actual_nThreads; i++) { snew(p_hb[i], 1); snew(p_adist[i], nabin+1); snew(p_rdist[i], nrbin+1); p_hb[i]->max_frames = 0; p_hb[i]->nhb = NULL; p_hb[i]->ndist = NULL; p_hb[i]->n_bound = NULL; p_hb[i]->time = NULL; p_hb[i]->nhx = NULL; p_hb[i]->bHBmap = hb->bHBmap; p_hb[i]->bDAnr = hb->bDAnr; p_hb[i]->bGem = hb->bGem; p_hb[i]->wordlen = hb->wordlen; p_hb[i]->nframes = hb->nframes; p_hb[i]->maxhydro = hb->maxhydro; p_hb[i]->danr = hb->danr; p_hb[i]->d = hb->d; p_hb[i]->a = hb->a; p_hb[i]->hbmap = hb->hbmap; p_hb[i]->time = hb->time; /* This may need re-syncing at every frame. */ p_hb[i]->per = hb->per; #ifdef HAVE_NN_LOOPS p_hb[i]->hbE = hb->hbE; #endif p_hb[i]->nrhb = 0; p_hb[i]->nrdist = 0; } } /* Make a thread pool here, * instead of forking anew at every frame. */ #pragma omp parallel \ firstprivate(i) \ private(j, h, ii, jj, hh, E, \ xi, yi, zi, xj, yj, zj, threadNr, \ dist, ang, peri, icell, jcell, \ grp, ogrp, ai, aj, xjj, yjj, zjj, \ xk, yk, zk, ihb, id, resdist, \ xkk, ykk, zkk, kcell, ak, k, bTric, \ bEdge_xjj, bEdge_yjj) \ default(shared) { /* Start of parallel region */ threadNr = gmx_omp_get_thread_num(); do { bTric = bBox && TRICLINIC(box); if (bOMP) { sync_hbdata(hb, p_hb[threadNr], nframes, t); } #pragma omp single { build_grid(hb, x, x[shatom], bBox, box, hbox, (rcut > r2cut) ? rcut : r2cut, rshell, ngrid, grid); reset_nhbonds(&(hb->d)); if (debug && bDebug) { dump_grid(debug, ngrid, grid); } add_frames(hb, nframes); init_hbframe(hb, nframes, output_env_conv_time(oenv, t)); if (hb->bDAnr) { count_da_grid(ngrid, grid, hb->danr[nframes]); } } /* omp single */ if (bOMP) { p_hb[threadNr]->time = hb->time; /* This pointer may have changed. */ } if (bNN) { #ifdef HAVE_NN_LOOPS /* Unlock this feature when testing */ /* Loop over all atom pairs and estimate interaction energy */ #pragma omp single { addFramesNN(hb, nframes); } #pragma omp barrier #pragma omp for schedule(dynamic) for (i = 0; i < hb->d.nrd; i++) { for (j = 0; j < hb->a.nra; j++) { for (h = 0; h < (bContact ? 1 : hb->d.nhydro[i]); h++) { if (i == hb->d.nrd || j == hb->a.nra) { gmx_fatal(FARGS, "out of bounds"); } /* Get the real atom ids */ ii = hb->d.don[i]; jj = hb->a.acc[j]; hh = hb->d.hydro[i][h]; /* Estimate the energy from the geometry */ E = calcHbEnergy(ii, jj, hh, x, NN, box, hbox, &(hb->d)); /* Store the energy */ storeHbEnergy(hb, i, j, h, E, nframes); } } } #endif /* HAVE_NN_LOOPS */ } /* if (bNN)*/ else { if (bSelected) { #pragma omp single { /* Do not parallelize this just yet. */ /* int ii; */ for (ii = 0; (ii < nsel); ii++) { int dd = index[0][i]; int aa = index[0][i+2]; /* int */ hh = index[0][i+1]; ihb = is_hbond(hb, ii, ii, dd, aa, rcut, r2cut, ccut, x, bBox, box, hbox, &dist, &ang, bDA, &h, bContact, bMerge, &peri); if (ihb) { /* add to index if not already there */ /* Add a hbond */ add_hbond(hb, dd, aa, hh, ii, ii, nframes, bMerge, ihb, bContact, peri); } } } /* omp single */ } /* if (bSelected) */ else { #pragma omp single { if (bGem) { calcBoxProjection(box, hb->per->P); } /* loop over all gridcells (xi,yi,zi) */ /* Removed confusing macro, DvdS 27/12/98 */ } /* The outer grid loop will have to do for now. */ #pragma omp for schedule(dynamic) for (xi = 0; xi < ngrid[XX]; xi++) { for (yi = 0; (yi < ngrid[YY]); yi++) { for (zi = 0; (zi < ngrid[ZZ]); zi++) { /* loop over donor groups gr0 (always) and gr1 (if necessary) */ for (grp = gr0; (grp <= (bTwo ? gr1 : gr0)); grp++) { icell = &(grid[zi][yi][xi].d[grp]); if (bTwo) { ogrp = 1-grp; } else { ogrp = grp; } /* loop over all hydrogen atoms from group (grp) * in this gridcell (icell) */ for (ai = 0; (ai < icell->nr); ai++) { i = icell->atoms[ai]; /* loop over all adjacent gridcells (xj,yj,zj) */ for (zjj = grid_loop_begin(ngrid[ZZ], zi, bTric, FALSE); zjj <= grid_loop_end(ngrid[ZZ], zi, bTric, FALSE); zjj++) { zj = grid_mod(zjj, ngrid[ZZ]); bEdge_yjj = (zj == 0) || (zj == ngrid[ZZ] - 1); for (yjj = grid_loop_begin(ngrid[YY], yi, bTric, bEdge_yjj); yjj <= grid_loop_end(ngrid[YY], yi, bTric, bEdge_yjj); yjj++) { yj = grid_mod(yjj, ngrid[YY]); bEdge_xjj = (yj == 0) || (yj == ngrid[YY] - 1) || (zj == 0) || (zj == ngrid[ZZ] - 1); for (xjj = grid_loop_begin(ngrid[XX], xi, bTric, bEdge_xjj); xjj <= grid_loop_end(ngrid[XX], xi, bTric, bEdge_xjj); xjj++) { xj = grid_mod(xjj, ngrid[XX]); jcell = &(grid[zj][yj][xj].a[ogrp]); /* loop over acceptor atoms from other group (ogrp) * in this adjacent gridcell (jcell) */ for (aj = 0; (aj < jcell->nr); aj++) { j = jcell->atoms[aj]; /* check if this once was a h-bond */ peri = -1; ihb = is_hbond(__HBDATA, grp, ogrp, i, j, rcut, r2cut, ccut, x, bBox, box, hbox, &dist, &ang, bDA, &h, bContact, bMerge, &peri); if (ihb) { /* add to index if not already there */ /* Add a hbond */ add_hbond(__HBDATA, i, j, h, grp, ogrp, nframes, bMerge, ihb, bContact, peri); /* make angle and distance distributions */ if (ihb == hbHB && !bContact) { if (dist > rcut) { gmx_fatal(FARGS, "distance is higher than what is allowed for an hbond: %f", dist); } ang *= RAD2DEG; __ADIST[(int)( ang/abin)]++; __RDIST[(int)(dist/rbin)]++; if (!bTwo) { int id, ia; if ((id = donor_index(&hb->d, grp, i)) == NOTSET) { gmx_fatal(FARGS, "Invalid donor %d", i); } if ((ia = acceptor_index(&hb->a, ogrp, j)) == NOTSET) { gmx_fatal(FARGS, "Invalid acceptor %d", j); } resdist = abs(top.atoms.atom[i].resind- top.atoms.atom[j].resind); if (resdist >= max_hx) { resdist = max_hx-1; } __HBDATA->nhx[nframes][resdist]++; } } } } /* for aj */ } /* for xjj */ } /* for yjj */ } /* for zjj */ } /* for ai */ } /* for grp */ } /* for xi,yi,zi */ } } } /* if (bSelected) {...} else */ /* Better wait for all threads to finnish using x[] before updating it. */ k = nframes; #pragma omp barrier #pragma omp critical { /* Sum up histograms and counts from p_hb[] into hb */ if (bOMP) { hb->nhb[k] += p_hb[threadNr]->nhb[k]; hb->ndist[k] += p_hb[threadNr]->ndist[k]; for (j = 0; j < max_hx; j++) { hb->nhx[k][j] += p_hb[threadNr]->nhx[k][j]; } } } /* Here are a handful of single constructs * to share the workload a bit. The most * important one is of course the last one, * where there's a potential bottleneck in form * of slow I/O. */ #pragma omp barrier #pragma omp single { if (hb != NULL) { analyse_donor_props(opt2fn_null("-don", NFILE, fnm), hb, k, t, oenv); } } #pragma omp single { if (fpnhb) { do_nhb_dist(fpnhb, hb, t); } } } /* if (bNN) {...} else + */ #pragma omp single { trrStatus = (read_next_x(oenv, status, &t, natoms, x, box)); nframes++; } #pragma omp barrier } while (trrStatus); if (bOMP) { #pragma omp critical { hb->nrhb += p_hb[threadNr]->nrhb; hb->nrdist += p_hb[threadNr]->nrdist; } /* Free parallel datastructures */ sfree(p_hb[threadNr]->nhb); sfree(p_hb[threadNr]->ndist); sfree(p_hb[threadNr]->nhx); #pragma omp for for (i = 0; i < nabin; i++) { for (j = 0; j < actual_nThreads; j++) { adist[i] += p_adist[j][i]; } } #pragma omp for for (i = 0; i <= nrbin; i++) { for (j = 0; j < actual_nThreads; j++) { rdist[i] += p_rdist[j][i]; } } sfree(p_adist[threadNr]); sfree(p_rdist[threadNr]); } } /* End of parallel region */ if (bOMP) { sfree(p_adist); sfree(p_rdist); } if (nframes < 2 && (opt2bSet("-ac", NFILE, fnm) || opt2bSet("-life", NFILE, fnm))) { gmx_fatal(FARGS, "Cannot calculate autocorrelation of life times with less than two frames"); } free_grid(ngrid, &grid); close_trj(status); if (fpnhb) { ffclose(fpnhb); } /* Compute maximum possible number of different hbonds */ if (maxnhb > 0) { max_nhb = maxnhb; } else { max_nhb = 0.5*(hb->d.nrd*hb->a.nra); } /* Added support for -contact below. * - Erik Marklund, May 29-31, 2006 */ /* Changed contact code. * - Erik Marklund, June 29, 2006 */ if (bHBmap && !bNN) { if (hb->nrhb == 0) { printf("No %s found!!\n", bContact ? "contacts" : "hydrogen bonds"); } else { printf("Found %d different %s in trajectory\n" "Found %d different atom-pairs within %s distance\n", hb->nrhb, bContact ? "contacts" : "hydrogen bonds", hb->nrdist, (r2cut > 0) ? "second cut-off" : "hydrogen bonding"); /*Control the pHist.*/ if (bMerge) { merge_hb(hb, bTwo, bContact); } if (opt2bSet("-hbn", NFILE, fnm)) { dump_hbmap(hb, NFILE, fnm, bTwo, bContact, isize, index, grpnames, &top.atoms); } /* Moved the call to merge_hb() to a line BEFORE dump_hbmap * to make the -hbn and -hmb output match eachother. * - Erik Marklund, May 30, 2006 */ } } /* Print out number of hbonds and distances */ aver_nhb = 0; aver_dist = 0; fp = xvgropen(opt2fn("-num", NFILE, fnm), bContact ? "Contacts" : "Hydrogen Bonds", output_env_get_xvgr_tlabel(oenv), "Number", oenv); snew(leg, 2); snew(leg[0], STRLEN); snew(leg[1], STRLEN); sprintf(leg[0], "%s", bContact ? "Contacts" : "Hydrogen bonds"); sprintf(leg[1], "Pairs within %g nm", (r2cut > 0) ? r2cut : rcut); xvgr_legend(fp, 2, (const char**)leg, oenv); sfree(leg[1]); sfree(leg[0]); sfree(leg); for (i = 0; (i < nframes); i++) { fprintf(fp, "%10g %10d %10d\n", hb->time[i], hb->nhb[i], hb->ndist[i]); aver_nhb += hb->nhb[i]; aver_dist += hb->ndist[i]; } ffclose(fp); aver_nhb /= nframes; aver_dist /= nframes; /* Print HB distance distribution */ if (opt2bSet("-dist", NFILE, fnm)) { long sum; sum = 0; for (i = 0; i < nrbin; i++) { sum += rdist[i]; } fp = xvgropen(opt2fn("-dist", NFILE, fnm), "Hydrogen Bond Distribution", bDA ? "Donor - Acceptor Distance (nm)" : "Hydrogen - Acceptor Distance (nm)", "", oenv); for (i = 0; i < nrbin; i++) { fprintf(fp, "%10g %10g\n", (i+0.5)*rbin, rdist[i]/(rbin*(real)sum)); } ffclose(fp); } /* Print HB angle distribution */ if (opt2bSet("-ang", NFILE, fnm)) { long sum; sum = 0; for (i = 0; i < nabin; i++) { sum += adist[i]; } fp = xvgropen(opt2fn("-ang", NFILE, fnm), "Hydrogen Bond Distribution", "Hydrogen - Donor - Acceptor Angle (\\SO\\N)", "", oenv); for (i = 0; i < nabin; i++) { fprintf(fp, "%10g %10g\n", (i+0.5)*abin, adist[i]/(abin*(real)sum)); } ffclose(fp); } /* Print HB in alpha-helix */ if (opt2bSet("-hx", NFILE, fnm)) { fp = xvgropen(opt2fn("-hx", NFILE, fnm), "Hydrogen Bonds", output_env_get_xvgr_tlabel(oenv), "Count", oenv); xvgr_legend(fp, NRHXTYPES, hxtypenames, oenv); for (i = 0; i < nframes; i++) { fprintf(fp, "%10g", hb->time[i]); for (j = 0; j < max_hx; j++) { fprintf(fp, " %6d", hb->nhx[i][j]); } fprintf(fp, "\n"); } ffclose(fp); } if (!bNN) { printf("Average number of %s per timeframe %.3f out of %g possible\n", bContact ? "contacts" : "hbonds", bContact ? aver_dist : aver_nhb, max_nhb); } /* Do Autocorrelation etc. */ if (hb->bHBmap) { /* Added support for -contact in ac and hbm calculations below. - Erik Marklund, May 29, 2006 */ ivec itmp; rvec rtmp; if (opt2bSet("-ac", NFILE, fnm) || opt2bSet("-life", NFILE, fnm)) { please_cite(stdout, "Spoel2006b"); } if (opt2bSet("-ac", NFILE, fnm)) { char *gemstring = NULL; if (bGem || bNN) { params = init_gemParams(rcut, D, hb->time, hb->nframes/2, nFitPoints, fit_start, fit_end, gemBallistic, nBalExp, bBallisticDt); if (params == NULL) { gmx_fatal(FARGS, "Could not initiate t_gemParams params."); } } gemstring = strdup(gemType[hb->per->gemtype]); do_hbac(opt2fn("-ac", NFILE, fnm), hb, nDump, bMerge, bContact, fit_start, temp, r2cut > 0, smooth_tail_start, oenv, params, gemstring, nThreads, NN, bBallistic, bGemFit); } if (opt2bSet("-life", NFILE, fnm)) { do_hblife(opt2fn("-life", NFILE, fnm), hb, bMerge, bContact, oenv); } if (opt2bSet("-hbm", NFILE, fnm)) { t_matrix mat; int id, ia, hh, x, y; if ((nframes > 0) && (hb->nrhb > 0)) { mat.nx = nframes; mat.ny = hb->nrhb; snew(mat.matrix, mat.nx); for (x = 0; (x < mat.nx); x++) { snew(mat.matrix[x], mat.ny); } y = 0; for (id = 0; (id < hb->d.nrd); id++) { for (ia = 0; (ia < hb->a.nra); ia++) { for (hh = 0; (hh < hb->maxhydro); hh++) { if (hb->hbmap[id][ia]) { if (ISHB(hb->hbmap[id][ia]->history[hh])) { /* Changed '<' into '<=' in the for-statement below. * It fixed the previously undiscovered bug that caused * the last occurance of an hbond/contact to not be * set in mat.matrix. Have a look at any old -hbm-output * and you will notice that the last column is allways empty. * - Erik Marklund May 30, 2006 */ for (x = 0; (x <= hb->hbmap[id][ia]->nframes); x++) { int nn0 = hb->hbmap[id][ia]->n0; range_check(y, 0, mat.ny); mat.matrix[x+nn0][y] = is_hb(hb->hbmap[id][ia]->h[hh], x); } y++; } } } } } mat.axis_x = hb->time; snew(mat.axis_y, mat.ny); for (j = 0; j < mat.ny; j++) { mat.axis_y[j] = j; } sprintf(mat.title, bContact ? "Contact Existence Map" : "Hydrogen Bond Existence Map"); sprintf(mat.legend, bContact ? "Contacts" : "Hydrogen Bonds"); sprintf(mat.label_x, "%s", output_env_get_xvgr_tlabel(oenv)); sprintf(mat.label_y, bContact ? "Contact Index" : "Hydrogen Bond Index"); mat.bDiscrete = TRUE; mat.nmap = 2; snew(mat.map, mat.nmap); for (i = 0; i < mat.nmap; i++) { mat.map[i].code.c1 = hbmap[i]; mat.map[i].desc = hbdesc[i]; mat.map[i].rgb = hbrgb[i]; } fp = opt2FILE("-hbm", NFILE, fnm, "w"); write_xpm_m(fp, mat); ffclose(fp); for (x = 0; x < mat.nx; x++) { sfree(mat.matrix[x]); } sfree(mat.axis_y); sfree(mat.matrix); sfree(mat.map); } else { fprintf(stderr, "No hydrogen bonds/contacts found. No hydrogen bond map will be printed.\n"); } } } if (bGem) { fprintf(stderr, "There were %i periodic shifts\n", hb->per->nper); fprintf(stderr, "Freeing pHist for all donors...\n"); for (i = 0; i < hb->d.nrd; i++) { fprintf(stderr, "\r%i", i); if (hb->per->pHist[i] != NULL) { for (j = 0; j < hb->a.nra; j++) { clearPshift(&(hb->per->pHist[i][j])); } sfree(hb->per->pHist[i]); } } sfree(hb->per->pHist); sfree(hb->per->p2i); sfree(hb->per); fprintf(stderr, "...done.\n"); } #ifdef HAVE_NN_LOOPS if (bNN) { free_hbEmap(hb); } #endif if (hb->bDAnr) { int i, j, nleg; char **legnames; char buf[STRLEN]; #define USE_THIS_GROUP(j) ( (j == gr0) || (bTwo && (j == gr1)) ) fp = xvgropen(opt2fn("-dan", NFILE, fnm), "Donors and Acceptors", output_env_get_xvgr_tlabel(oenv), "Count", oenv); nleg = (bTwo ? 2 : 1)*2; snew(legnames, nleg); i = 0; for (j = 0; j < grNR; j++) { if (USE_THIS_GROUP(j) ) { sprintf(buf, "Donors %s", grpnames[j]); legnames[i++] = strdup(buf); sprintf(buf, "Acceptors %s", grpnames[j]); legnames[i++] = strdup(buf); } } if (i != nleg) { gmx_incons("number of legend entries"); } xvgr_legend(fp, nleg, (const char**)legnames, oenv); for (i = 0; i < nframes; i++) { fprintf(fp, "%10g", hb->time[i]); for (j = 0; (j < grNR); j++) { if (USE_THIS_GROUP(j) ) { fprintf(fp, " %6d", hb->danr[i][j]); } } fprintf(fp, "\n"); } ffclose(fp); } thanx(stdout); return 0; }
GB_unaryop__identity_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_int64 // op(A') function: GB_tran__identity_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_int64 ( float *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
FunctionalDecomposition_Newadd.c
#include<stdlib.h> #include<stdio.h> #include<math.h> #include<omp.h> int NowYear; // 2017 - 2022 int NowMonth; // 0 - 11 float NowPrecip; // inches of rain per month float NowTemp; // temperature this month float NowHeight; // grain height in inches int NowNumDeer; // number of deer in the current population int KilledDeer; const float GRAIN_GROWS_PER_MONTH = 8.0; const float ONE_DEER_EATS_PER_MONTH = 0.5; const float AVG_PRECIP_PER_MONTH = 6.0; // average const float AMP_PRECIP_PER_MONTH = 6.0; // plus or minus const float RANDOM_PRECIP = 2.0; // plus or minus noise const float AVG_TEMP = 50.0; // average const float AMP_TEMP = 20.0; // plus or minus const float RANDOM_TEMP = 10.0; // plus or minus noise const float MIDTEMP = 40.0; const float MIDPRECIP = 10.0; float Ranf( unsigned int *seedp, float low, float high ) { float r = (float) rand_r( seedp ); // 0 - RAND_MAX return( low + r * ( high - low ) / (float)RAND_MAX ); } //Calculate square factor float SQR( float x ) { return x*x; } //Calculate the new Deer growth based on grain growth void GrainDeer() { float tmp=NowHeight; tmp -= (float)NowNumDeer * ONE_DEER_EATS_PER_MONTH; #pragma omp barrier if(tmp<1) { NowNumDeer--; if(NowNumDeer <0) { NowNumDeer=0; } } else { NowNumDeer++; } #pragma omp barrier NowHeight=NowHeight-tmp; #pragma omp barrier #pragma omp barrier } //calculate the new grain growth based on precipitation void Grain() { float tempFactor = exp(-SQR((NowTemp - MIDTEMP)/10.)); float precipFactor = exp(-SQR(( NowPrecip - MIDPRECIP)/10.)); float tmp=NowHeight; tmp +=tempFactor * precipFactor * GRAIN_GROWS_PER_MONTH; #pragma omp barrier NowHeight=tmp; #pragma omp barrier #pragma omp barrier #pragma omp barrier } void Hunter() { #pragma omp barrier #pragma omp barrier #pragma omp barrier double tmp= rand(); if(NowNumDeer !=0 && NowNumDeer !=1) { KilledDeer=((int)tmp % NowNumDeer)+1; if(KilledDeer == NowNumDeer) { KilledDeer--; } } else { KilledDeer=0; } #pragma omp barrier } void Watcher() { #pragma omp barrier #pragma omp barrier #pragma omp barrier float ang = ( 30.*(float)NowMonth + 15. ) * ( M_PI / 180. ); static unsigned int seed = 0; // a thread-private variable float temp = AVG_TEMP - AMP_TEMP * cos( ang ); NowTemp = temp + Ranf( &seed, -RANDOM_TEMP, RANDOM_TEMP ); float precip = AVG_PRECIP_PER_MONTH + AMP_PRECIP_PER_MONTH * sin( ang ); NowPrecip = precip + Ranf( &seed, -RANDOM_PRECIP, RANDOM_PRECIP ); if( NowPrecip < 0. ) NowPrecip = 0.; printf("Grain Growth= %f ,DeerGrowth= %d ,Temp= %f ,Precipitation= %f ,KilledDeer=%d\n",NowHeight,NowNumDeer,NowTemp, NowPrecip,KilledDeer); #pragma omp barrier } int main() { NowYear=2017; NowMonth=0; NowNumDeer=1; NowHeight=1; KilledDeer=0; float ang = ( 30.*(float)NowMonth + 15. ) * ( M_PI / 180. ); float temp = AVG_TEMP - AMP_TEMP * cos( ang ); static unsigned int seed = 0; NowTemp = temp + Ranf( &seed, -RANDOM_TEMP, RANDOM_TEMP ); float precip = AVG_PRECIP_PER_MONTH + AMP_PRECIP_PER_MONTH * sin( ang ); NowPrecip = precip + Ranf( &seed, -RANDOM_PRECIP, RANDOM_PRECIP ); if( NowPrecip < 0. ) NowPrecip = 0.; omp_set_num_threads(4); // same as # of sections while(NowYear <=2022) { while(NowMonth <=11) { #pragma omp parallel sections { #pragma omp section { GrainDeer(); } #pragma omp section { Grain(); } #pragma omp section { Watcher(); } #pragma omp section { Hunter(); // your own } } NowMonth++; //exit(0); } NowMonth=0; NowYear++; } }
concurrent_unordered_map.cuh.h
/* * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CONCURRENT_UNORDERED_MAP_CUH #define CONCURRENT_UNORDERED_MAP_CUH #include <thrust/pair.h> #include <cassert> #include <iostream> #include <iterator> #include <type_traits> #include "hash_functions.cuh" #include "managed.cuh" #include "managed_allocator.cuh" // TODO: replace this with CUDA_TRY and propagate the error #ifndef CUDA_RT_CALL #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) { \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), \ cudaStatus); \ exit(1); \ } \ } #endif // TODO: can we do this more efficiently? __inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare, int8_t val) { int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 3)); int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8); int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8); return (int8_t)atomicCAS(base_address, int_comp, int_val); } // TODO: can we do this more efficiently? __inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare, int16_t val) { int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 2)); int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8); int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8); return (int16_t)atomicCAS(base_address, int_comp, int_val); } __inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare, int64_t val) { return (int64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ uint64_t atomicCAS(uint64_t* address, uint64_t compare, uint64_t val) { return (uint64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ long long int atomicCAS(long long int* address, long long int compare, long long int val) { return (long long int)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ double atomicCAS(double* address, double compare, double val) { return __longlong_as_double(atomicCAS((unsigned long long int*)address, __double_as_longlong(compare), __double_as_longlong(val))); } __inline__ __device__ float atomicCAS(float* address, float compare, float val) { return __int_as_float( atomicCAS((int*)address, __float_as_int(compare), __float_as_int(val))); } __inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val) { return (int64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } __inline__ __device__ uint64_t atomicAdd(uint64_t* address, uint64_t val) { return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } template <typename pair_type> __forceinline__ __device__ pair_type load_pair_vectorized(const pair_type* __restrict__ const ptr) { if (sizeof(uint4) == sizeof(pair_type)) { union pair_type2vec_type { uint4 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0, 0, 0}; converter.vec_val = *reinterpret_cast<const uint4*>(ptr); return converter.pair_val; } else if (sizeof(uint2) == sizeof(pair_type)) { union pair_type2vec_type { uint2 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0}; converter.vec_val = *reinterpret_cast<const uint2*>(ptr); return converter.pair_val; } else if (sizeof(int) == sizeof(pair_type)) { union pair_type2vec_type { int vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.vec_val = *reinterpret_cast<const int*>(ptr); return converter.pair_val; } else if (sizeof(short) == sizeof(pair_type)) { union pair_type2vec_type { short vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.vec_val = *reinterpret_cast<const short*>(ptr); return converter.pair_val; } else { return *ptr; } } template <typename pair_type> __forceinline__ __device__ void store_pair_vectorized( pair_type* __restrict__ const ptr, const pair_type val) { if (sizeof(uint4) == sizeof(pair_type)) { union pair_type2vec_type { uint4 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0, 0, 0}; converter.pair_val = val; *reinterpret_cast<uint4*>(ptr) = converter.vec_val; } else if (sizeof(uint2) == sizeof(pair_type)) { union pair_type2vec_type { uint2 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0}; converter.pair_val = val; *reinterpret_cast<uint2*>(ptr) = converter.vec_val; } else if (sizeof(int) == sizeof(pair_type)) { union pair_type2vec_type { int vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.pair_val = val; *reinterpret_cast<int*>(ptr) = converter.vec_val; } else if (sizeof(short) == sizeof(pair_type)) { union pair_type2vec_type { short vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.pair_val = val; *reinterpret_cast<short*>(ptr) = converter.vec_val; } else { *ptr = val; } } template <typename value_type, typename size_type, typename key_type, typename elem_type> __global__ void init_hashtbl( // Init every entry of the table with // <unused_key, unused_value> pair value_type* __restrict__ const hashtbl_values, const size_type n, const key_type key_val, const elem_type elem_val) { const size_type idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n) { store_pair_vectorized( hashtbl_values + idx, thrust::make_pair( key_val, elem_val)); // Simply store every element a <K, V> pair } } template <typename T> struct equal_to { using result_type = bool; using first_argument_type = T; using second_argument_type = T; __forceinline__ __host__ __device__ constexpr bool operator()( const first_argument_type& lhs, const second_argument_type& rhs) const { return lhs == rhs; } }; template <typename Iterator> class cycle_iterator_adapter { public: using value_type = typename std::iterator_traits<Iterator>::value_type; using difference_type = typename std::iterator_traits<Iterator>::difference_type; using pointer = typename std::iterator_traits<Iterator>::pointer; using reference = typename std::iterator_traits<Iterator>::reference; using iterator_type = Iterator; cycle_iterator_adapter() = delete; __host__ __device__ explicit cycle_iterator_adapter( const iterator_type& begin, const iterator_type& end, const iterator_type& current) : m_begin(begin), m_end(end), m_current(current) {} __host__ __device__ cycle_iterator_adapter& operator++() { if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return *this; } __host__ __device__ const cycle_iterator_adapter& operator++() const { if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return *this; } __host__ __device__ cycle_iterator_adapter& operator++(int) { cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current); if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return old; } __host__ __device__ const cycle_iterator_adapter& operator++(int)const { cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current); if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return old; } __host__ __device__ bool equal( const cycle_iterator_adapter<iterator_type>& other) const { return m_current == other.m_current && m_begin == other.m_begin && m_end == other.m_end; } __host__ __device__ reference& operator*() { return *m_current; } __host__ __device__ const reference& operator*() const { return *m_current; } __host__ __device__ const pointer operator->() const { return m_current.operator->(); } __host__ __device__ pointer operator->() { return m_current; } __host__ __device__ iterator_type getter() const { return m_current; } private: iterator_type m_current; iterator_type m_begin; iterator_type m_end; }; template <class T> __host__ __device__ bool operator==(const cycle_iterator_adapter<T>& lhs, const cycle_iterator_adapter<T>& rhs) { return lhs.equal(rhs); } template <class T> __host__ __device__ bool operator!=(const cycle_iterator_adapter<T>& lhs, const cycle_iterator_adapter<T>& rhs) { return !lhs.equal(rhs); } /** * Does support concurrent insert, but not concurrent insert and probping. * * TODO: * - add constructor that takes pointer to hash_table to avoid allocations * - extend interface to accept streams */ template <typename Key, typename Element, Key unused_key, typename Hasher = default_hash<Key>, typename Equality = equal_to<Key>, typename Allocator = managed_allocator<thrust::pair<Key, Element>>, bool count_collisions = false> class concurrent_unordered_map : public managed { public: using size_type = size_t; using hasher = Hasher; using key_equal = Equality; using allocator_type = Allocator; using key_type = Key; using value_type = thrust::pair<Key, Element>; using mapped_type = Element; using iterator = cycle_iterator_adapter<value_type*>; using const_iterator = const cycle_iterator_adapter<value_type*>; private: union pair2longlong { unsigned long long int longlong; value_type pair; }; public: concurrent_unordered_map(const concurrent_unordered_map&) = delete; concurrent_unordered_map& operator=(const concurrent_unordered_map&) = delete; explicit concurrent_unordered_map(size_type n, const mapped_type unused_element, const Hasher& hf = hasher(), const Equality& eql = key_equal(), const allocator_type& a = allocator_type()) : m_hf(hf), m_equal(eql), m_allocator(a), m_hashtbl_size(n), m_hashtbl_capacity(n), m_collisions(0), m_unused_element( unused_element) { // allocate the raw data of hash table: // m_hashtbl_values,pre-alloc it on current GPU if UM. m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity); constexpr int block_size = 128; { cudaPointerAttributes hashtbl_values_ptr_attributes; cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values); #if CUDART_VERSION >= 10000 if (cudaSuccess == status && hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged) #else if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged) #endif { int dev_id = 0; CUDA_RT_CALL(cudaGetDevice(&dev_id)); CUDA_RT_CALL(cudaMemPrefetchAsync( m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, 0)); } } // Initialize kernel, set all entry to unused <K,V> init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size>>>( m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element); // CUDA_RT_CALL( cudaGetLastError() ); CUDA_RT_CALL(cudaStreamSynchronize(0)); CUDA_RT_CALL(cudaGetLastError()); } ~concurrent_unordered_map() { m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity); } __host__ __device__ iterator begin() { return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values); } __host__ __device__ const_iterator begin() const { return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values); } __host__ __device__ iterator end() { return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values + m_hashtbl_size); } __host__ __device__ const_iterator end() const { return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values + m_hashtbl_size); } __host__ __device__ size_type size() const { return m_hashtbl_size; } __host__ __device__ value_type* data() const { return m_hashtbl_values; } __forceinline__ static constexpr __host__ __device__ key_type get_unused_key() { return unused_key; } // Generic update of a hash table value for any aggregator template <typename aggregation_type> __forceinline__ __device__ void update_existing_value( mapped_type& existing_value, value_type const& insert_pair, aggregation_type) { // update without CAS existing_value = insert_pair.second; } __forceinline__ __device__ void accum_existing_value_atomic( mapped_type& existing_value, value_type const& accum_pair) { // update with CAS // existing_value = insert_pair.second; int num_element = sizeof(existing_value.data) / sizeof(*(existing_value.data)); const mapped_type& accumulator = accum_pair.second; for (int i = 0; i < num_element; i++) { atomicAdd(existing_value.data + i, accumulator.data[i]); } // atomicAdd(&existing_value, double val) } // TODO Overload atomicAdd for 1 byte and 2 byte types, until then, overload // specifically for the // types where atomicAdd already has an overload. Otherwise the generic // update_existing_value will // be used. Specialization for COUNT aggregator /* __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<int32_t> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<int64_t> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<float> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<double> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } */ /* --------------------------------------------------------------------------*/ /** * @Synopsis Inserts a new (key, value) pair. If the key already exists in the map an aggregation operation is performed with the new value and existing value. E.g., if the aggregation operation is 'max', then the maximum is computed between the new value and existing value and the result is stored in the map. * * @Param[in] x The new (key, value) pair to insert * @Param[in] op The aggregation operation to perform * @Param[in] keys_equal An optional functor for comparing two keys * @Param[in] precomputed_hash Indicates if a precomputed hash value is being passed in to use * to determine the write location of the new key * @Param[in] precomputed_hash_value The precomputed hash value * @tparam aggregation_type A functor for a binary operation that performs the aggregation * @tparam comparison_type A functor for comparing two keys * * @Returns An iterator to the newly inserted key,value pair */ /* ----------------------------------------------------------------------------*/ template <typename aggregation_type, class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ iterator insert( const value_type& x, aggregation_type op, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; hash_value_type hash_value{0}; // If a precomputed hash value has been passed in, then use it to determine // the write location of the new key if (true == precomputed_hash) { hash_value = precomputed_hash_value; } // Otherwise, compute the hash value from the new key else { hash_value = m_hf(x.first); } size_type current_index = hash_value % hashtbl_size; value_type* current_hash_bucket = &(hashtbl_values[current_index]); const key_type insert_key = x.first; bool insert_success = false; size_type counter = 0; while (false == insert_success) { if (counter++ >= hashtbl_size) { return end(); } key_type& existing_key = current_hash_bucket->first; mapped_type& existing_value = current_hash_bucket->second; // Try and set the existing_key for the current hash bucket to insert_key const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key); // If old_key == unused_key, the current hash bucket was empty // and existing_key was updated to insert_key by the atomicCAS. // If old_key == insert_key, this key has already been inserted. // In either case, perform the atomic aggregation of existing_value and // insert_value // Because the hash table is initialized with the identity value of the // aggregation // operation, it is safe to perform the operation when the existing_value // still // has its initial value // TODO: Use template specialization to make use of native atomic // functions // TODO: How to handle data types less than 32 bits? if (keys_equal(unused_key, old_key) || keys_equal(insert_key, old_key)) { update_existing_value(existing_value, x, op); insert_success = true; } current_index = (current_index + 1) % hashtbl_size; current_hash_bucket = &(hashtbl_values[current_index]); } return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size, current_hash_bucket); } /* This function is not currently implemented __forceinline__ __host__ __device__ iterator insert(const value_type& x) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; const size_type key_hash = m_hf( x.first ); size_type hash_tbl_idx = key_hash%hashtbl_size; value_type* it = 0; while (0 == it) { value_type* tmp_it = hashtbl_values + hash_tbl_idx; #ifdef __CUDA_ARCH__ if ( std::numeric_limits<key_type>::is_integer && std::numeric_limits<mapped_type>::is_integer && sizeof(unsigned long long int) == sizeof(value_type) ) { pair2longlong converter = {0ull}; converter.pair = thrust::make_pair( unused_key, m_unused_element ); const unsigned long long int unused = converter.longlong; converter.pair = x; const unsigned long long int value = converter.longlong; const unsigned long long int old_val = atomicCAS( reinterpret_cast<unsigned long long int*>(tmp_it), unused, value ); if ( old_val == unused ) { it = tmp_it; } else if ( count_collisions ) { atomicAdd( &m_collisions, 1 ); } } else { const key_type old_key = atomicCAS( &(tmp_it->first), unused_key, x.first ); if ( m_equal( unused_key, old_key ) ) { (m_hashtbl_values+hash_tbl_idx)->second = x.second; it = tmp_it; } else if ( count_collisions ) { atomicAdd( &m_collisions, 1 ); } } #else #pragma omp critical { if ( m_equal( unused_key, tmp_it->first ) ) { hashtbl_values[hash_tbl_idx] = thrust::make_pair( x.first, x.second ); it = tmp_it; } } #endif hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size; } return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it); } */ __forceinline__ __host__ __device__ const_iterator find(const key_type& k) const { size_type key_hash = m_hf(k); size_type hash_tbl_idx = key_hash % m_hashtbl_size; value_type* begin_ptr = 0; size_type counter = 0; while (0 == begin_ptr) { value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx; const key_type tmp_val = tmp_ptr->first; if (m_equal(k, tmp_val)) { begin_ptr = tmp_ptr; break; } if (m_equal(unused_key, tmp_val) || counter > m_hashtbl_size) { begin_ptr = m_hashtbl_values + m_hashtbl_size; break; } hash_tbl_idx = (hash_tbl_idx + 1) % m_hashtbl_size; ++counter; } return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, begin_ptr); } template <typename aggregation_type, typename counter_type, class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ iterator get_insert( const key_type& k, aggregation_type op, counter_type* value_counter, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; hash_value_type hash_value{0}; // If a precomputed hash value has been passed in, then use it to determine // the write location of the new key if (true == precomputed_hash) { hash_value = precomputed_hash_value; } // Otherwise, compute the hash value from the new key else { hash_value = m_hf(k); } size_type current_index = hash_value % hashtbl_size; value_type* current_hash_bucket = &(hashtbl_values[current_index]); const key_type insert_key = k; bool insert_success = false; size_type counter = 0; while (false == insert_success) { // Situation %5: No slot: All slot in the hashtable is occupied by other // key, both get and // insert fail. Return empty iterator if (counter++ >= hashtbl_size) { return end(); } key_type& existing_key = current_hash_bucket->first; volatile mapped_type& existing_value = current_hash_bucket->second; // Try and set the existing_key for the current hash bucket to insert_key const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key); // If old_key == unused_key, the current hash bucket was empty // and existing_key was updated to insert_key by the atomicCAS. // If old_key == insert_key, this key has already been inserted. // In either case, perform the atomic aggregation of existing_value and // insert_value // Because the hash table is initialized with the identity value of the // aggregation // operation, it is safe to perform the operation when the existing_value // still // has its initial value // TODO: Use template specialization to make use of native atomic // functions // TODO: How to handle data types less than 32 bits? // Situation #1: Empty slot: this key never exist in the table, ready to // insert. if (keys_equal(unused_key, old_key)) { // update_existing_value(existing_value, x, op); existing_value = (mapped_type)(atomicAdd(value_counter, 1)); break; } // Situation #2+#3: Target slot: This slot is the slot for this key else if (keys_equal(insert_key, old_key)) { while (existing_value == m_unused_element) { // Situation #2: This slot is inserting by another CUDA thread and the // value is not yet // ready, just wait } // Situation #3: This slot is already ready, get successfully and return // (iterator of) the // value break; } // Situation 4: Wrong slot: This slot is occupied by other key, get fail, // do nothing and // linear probing to next slot. current_index = (current_index + 1) % hashtbl_size; current_hash_bucket = &(hashtbl_values[current_index]); } return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size, current_hash_bucket); } int assign_async(const concurrent_unordered_map& other, cudaStream_t stream = 0) { m_collisions = other.m_collisions; if (other.m_hashtbl_size <= m_hashtbl_capacity) { m_hashtbl_size = other.m_hashtbl_size; } else { m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity); m_hashtbl_capacity = other.m_hashtbl_size; m_hashtbl_size = other.m_hashtbl_size; m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity); } CUDA_RT_CALL(cudaMemcpyAsync(m_hashtbl_values, other.m_hashtbl_values, m_hashtbl_size * sizeof(value_type), cudaMemcpyDefault, stream)); return 0; } void clear_async(cudaStream_t stream = 0) { constexpr int block_size = 128; init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size, 0, stream>>>(m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element); if (count_collisions) m_collisions = 0; } unsigned long long get_num_collisions() const { return m_collisions; } void print() { for (size_type i = 0; i < 10; ++i) { std::cout << i << ": " << m_hashtbl_values[i].first << "," << m_hashtbl_values[i].second << std::endl; } } int prefetch(const int dev_id, cudaStream_t stream = 0) { cudaPointerAttributes hashtbl_values_ptr_attributes; cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values); #if CUDART_VERSION >= 10000 if (cudaSuccess == status && hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged) #else if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged) #endif { CUDA_RT_CALL(cudaMemPrefetchAsync(m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, stream)); } CUDA_RT_CALL(cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream)); return 0; } template <class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ const_iterator accum(const value_type& x, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const key_type& dst_key = x.first; auto it = find(dst_key); if (it == end()) { return it; } value_type* dst = it.getter(); accum_existing_value_atomic(dst->second, x); return it; } private: const hasher m_hf; const key_equal m_equal; const mapped_type m_unused_element; allocator_type m_allocator; size_type m_hashtbl_size; size_type m_hashtbl_capacity; value_type* m_hashtbl_values; unsigned long long m_collisions; }; #endif // CONCURRENT_UNORDERED_MAP_CUH
mass.h
#pragma once class MassGrowth{ public: static PS::F64 tau_mass; static PS::F64 mass_crit; static PS::F64 mass_max; std::vector<PS::S32> id_list; MassGrowth(){ id_list.clear(); } template <class Tpsys> PS::S32 makeList(Tpsys & pp, PS::F64 time){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); id_list.clear(); for ( PS::S32 i=0; i<n_loc; i++ ){ if ( mass_crit < pp[i].mass && mass_max > pp[i].mass ) { id_list.push_back(i); } } } template <class Tpsys> PS::S32 calcMassGrowth(Tpsys & pp, PS::F64 time, PS::F64 dt_tree){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); const PS::S32 n_list = id_list.size(); #pragma omp parallel for for ( PS::S32 i=0; i<n_loc; i++ ){ for ( PS::S32 j=0; j<n_list; j++ ){ if ( pp[i] == id_list.at(j) ){ if ( tau_mass != 0. ) pp[i].mass *= exp(dt_tree / tau_mass); } } } return n_list; } }; PS::F64 MassGrowth::tau_mass = 0.;
ast-dump-openmp-target-teams-distribute-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target teams distribute parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target teams distribute parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target teams distribute parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:4:1, col:54> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:10:1, col:54> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:17:1, col:66> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:24:1, col:66> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:31:1, col:66> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:55, col:65> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:64> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:64> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
parallel-reduction.c
#include<assert.h> #include<omp.h> #include<stdio.h> int main(void) { int i =100, sum=100; int thread_num; #pragma omp parallel reduction(+:sum) { #pragma omp single { thread_num = omp_get_num_threads(); } sum += i; } printf("thread num=%d sum =%d\n", thread_num, sum); assert(sum == (i*thread_num + 100)); return 0; }
MultiwayMerge.h
#include "../CombBLAS.h" /*************************************************************************** * Find indices of column splitters in a list of std::tuple in parallel. * Inputs: * tuples: an array of SpTuples each std::tuple is (rowid, colid, val) * nsplits: number of splits requested * Output: * splitters: An array of size (nsplits+1) storing the starts and ends of split tuples. * different type used for output since we might need int or IT ***************************************************************************/ template <typename RT, typename IT, typename NT> std::vector<RT> findColSplitters(SpTuples<IT,NT> * & spTuples, int nsplits) { std::vector<RT> splitters(nsplits+1); splitters[0] = static_cast<RT>(0); ColLexiCompare<IT,NT> comp; #pragma omp parallel for for(int i=1; i< nsplits; i++) { IT cur_col = i * (spTuples->getncol()/nsplits); std::tuple<IT,IT,NT> search_tuple(0, cur_col, 0); std::tuple<IT,IT,NT>* it = std::lower_bound (spTuples->tuples, spTuples->tuples + spTuples->getnnz(), search_tuple, comp); splitters[i] = (RT) (it - spTuples->tuples); } splitters[nsplits] = spTuples->getnnz(); return splitters; } /* "Internal function" called by MultiwayMerge inside threaded region. Never called from outside. Assumption1: the input lists are already column sorted Assumption2: at least two lists are passed to this function Assumption3: the input and output lists are to be deleted by caller */ template<class SR, class IT, class NT> SpTuples<IT,NT>* SerialMerge( const std::vector<SpTuples<IT,NT> *> & ArrSpTups, std::tuple<IT, IT, NT> * ntuples) { int nlists = ArrSpTups.size(); ColLexiCompare<IT,int> heapcomp; std::vector<std::tuple<IT, IT, int>> heap(nlists); // if performance issue, create this outside of threaded region std::vector<IT> curptr(nlists, static_cast<IT>(0)); IT estnnz = 0; IT hsize = 0; for(int i=0; i< nlists; ++i) { if(ArrSpTups[i]->getnnz()>0) { estnnz += ArrSpTups[i]->getnnz(); heap[hsize++] = std::make_tuple(std::get<0>(ArrSpTups[i]->tuples[0]), std::get<1>(ArrSpTups[i]->tuples[0]), i); } } std::make_heap(heap.data(), heap.data()+hsize, not2(heapcomp)); IT cnz = 0; while(hsize > 0) { std::pop_heap(heap.data(), heap.data() + hsize, not2(heapcomp)); // result is stored in heap[hsize-1] int source = std::get<2>(heap[hsize-1]); if( (cnz != 0) && ((std::get<0>(ntuples[cnz-1]) == std::get<0>(heap[hsize-1])) && (std::get<1>(ntuples[cnz-1]) == std::get<1>(heap[hsize-1]))) ) { std::get<2>(ntuples[cnz-1]) = SR::add(std::get<2>(ntuples[cnz-1]), ArrSpTups[source]->numvalue(curptr[source]++)); } else { ntuples[cnz++] = ArrSpTups[source]->tuples[curptr[source]++]; } if(curptr[source] != ArrSpTups[source]->getnnz()) // That array has not been depleted { heap[hsize-1] = std::make_tuple(std::get<0>(ArrSpTups[source]->tuples[curptr[source]]), std::get<1>(ArrSpTups[source]->tuples[curptr[source]]), source); std::push_heap(heap.data(), heap.data()+hsize, not2(heapcomp)); } else { --hsize; } } return new SpTuples<IT,NT> (cnz, ArrSpTups[0]->getnrow(), ArrSpTups[0]->getncol(), ntuples, true); } // Performs a balanced merge of the array of SpTuples // Assumes the input parameters are already column sorted template<class SR, class IT, class NT> SpTuples<IT, NT>* MultiwayMerge( std::vector<SpTuples<IT,NT> *> & ArrSpTups, IT mdim = 0, IT ndim = 0, bool delarrs = false ) { int nlists = ArrSpTups.size(); if(nlists == 0) { return new SpTuples<IT,NT>(0, mdim, ndim); //empty mxn SpTuples } if(nlists == 1) { if(delarrs) // steal data from input, and don't delete input { return ArrSpTups[0]; } else // std::copy input to output { std::tuple<IT, IT, NT>* mergeTups = static_cast<std::tuple<IT, IT, NT>*> (::operator new (sizeof(std::tuple<IT, IT, NT>[ArrSpTups[0]->getnnz()]))); #pragma omp parallel for for(int i=0; i<ArrSpTups[0]->getnnz(); i++) mergeTups[i] = ArrSpTups[0]->tuples[i]; return new SpTuples<IT,NT> (ArrSpTups[0]->getnnz(), mdim, ndim, mergeTups, true); } } // ---- check correctness of input dimensions ------ for(int i=0; i< nlists; ++i) { if((mdim != ArrSpTups[i]->getnrow()) || ndim != ArrSpTups[i]->getncol()) { std::cerr << "Dimensions of SpTuples do not match on multiwayMerge()" << std::endl; return new SpTuples<IT,NT>(0,0,0); } } int nthreads; #pragma omp parallel { nthreads = omp_get_num_threads(); } int nsplits = 4*nthreads; // oversplit for load balance nsplits = std::min(nsplits, (int)ndim); // we cannot split a column std::vector< std::vector<IT> > colPtrs; for(int i=0; i< nlists; i++) { colPtrs.push_back(findColSplitters<IT>(ArrSpTups[i], nsplits)); // in parallel } // ------ estimate memory requirement after merge in each split ------ std::vector<IT> nnzPerSplit(nsplits); IT nnzAll = static_cast<IT>(0); //#pragma omp parallel for for(int i=0; i< nsplits; i++) { IT t = static_cast<IT>(0); for(int j=0; j< nlists; ++j) t += colPtrs[j][i+1] - colPtrs[j][i]; nnzPerSplit[i] = t; nnzAll += t; } // ------ allocate memory in a serial region ------ std::vector<std::tuple<IT, IT, NT> *> mergeBuf(nsplits); for(int i=0; i< nsplits; i++) { mergeBuf[i] = static_cast<std::tuple<IT, IT, NT>*> (::operator new (sizeof(std::tuple<IT, IT, NT>[nnzPerSplit[i]]))); } // ------ perform merge in parallel ------ std::vector<SpTuples<IT,NT> *> listMergeTups(nsplits); // use the memory allocated in mergeBuf #pragma omp parallel for schedule(dynamic) for(int i=0; i< nsplits; i++) // serially merge part by part { std::vector<SpTuples<IT,NT> *> listSplitTups(nlists); for(int j=0; j< nlists; ++j) { IT curnnz= colPtrs[j][i+1] - colPtrs[j][i]; listSplitTups[j] = new SpTuples<IT, NT> (curnnz, mdim, ndim, ArrSpTups[j]->tuples + colPtrs[j][i], true); } listMergeTups[i] = SerialMerge<SR>(listSplitTups, mergeBuf[i]); } // ------ concatenate merged tuples processed by threads ------ std::vector<IT> tdisp(nsplits+1); tdisp[0] = 0; for(int i=0; i<nsplits; ++i) { tdisp[i+1] = tdisp[i] + listMergeTups[i]->getnnz(); } IT mergedListSize = tdisp[nsplits]; std::tuple<IT, IT, NT>* shrunkTuples = static_cast<std::tuple<IT, IT, NT>*> (::operator new (sizeof(std::tuple<IT, IT, NT>[mergedListSize]))); #pragma omp parallel for schedule(dynamic) for(int i=0; i< nsplits; i++) { std::copy(listMergeTups[i]->tuples , listMergeTups[i]->tuples + listMergeTups[i]->getnnz(), shrunkTuples + tdisp[i]); } for(int i=0; i< nsplits; i++) { //::operator delete(listMergeTups[i]->tuples); ::operator delete(mergeBuf[i]); } for(int i=0; i< nlists; i++) { if(delarrs) delete ArrSpTups[i]; // this might be expensive for large local matrices } return new SpTuples<IT, NT> (mergedListSize, mdim, ndim, shrunkTuples, true); }
spacetime_heat_initial_m1_kernel_antiderivative.h
/* Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of Technology All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of VSB - Technical University of Ostrava and Graz University of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file spacetime_heat_initial_m1_kernel_antiderivative.h * @brief */ #ifndef INCLUDE_BESTHEA_SPACETIME_HEAT_INITIAL_M1_KERNEL_ANTIDERIVATIVE_H_ #define INCLUDE_BESTHEA_SPACETIME_HEAT_INITIAL_M1_KERNEL_ANTIDERIVATIVE_H_ #include <besthea/spacetime_heat_initial_kernel_antiderivative.h> #include "besthea/settings.h" #include <vector> namespace besthea { namespace bem { class spacetime_heat_initial_m1_kernel_antiderivative; } } /** * Class representing a first and second antiderivative of the double-layer * spacetime kernel. */ class besthea::bem::spacetime_heat_initial_m1_kernel_antiderivative : public besthea::bem::spacetime_heat_initial_kernel_antiderivative< spacetime_heat_initial_m1_kernel_antiderivative > { public: /** * Constructor. * @param[in] alpha Heat conductivity. */ spacetime_heat_initial_m1_kernel_antiderivative( sc alpha ) : spacetime_heat_initial_kernel_antiderivative< spacetime_heat_initial_m1_kernel_antiderivative >( alpha ) { } /** * Destructor. */ virtual ~spacetime_heat_initial_m1_kernel_antiderivative( ) { } /** * Evaluates the first antiderivative. * @param[in] xy1 First coordinate of `x - y`. * @param[in] xy2 Second coordinate of `x - y`. * @param[in] xy3 Third coordinate of `x - y`. * @param[in] nx Normal in the `x` variable. * @param[in] t `t`. */ #pragma omp declare simd uniform( this, nx, t ) simdlen( DATA_WIDTH ) sc do_anti_t_regular( sc xy1, sc xy2, sc xy3, const sc * nx, sc t ) const { sc norm2 = xy1 * xy1 + xy2 * xy2 + xy3 * xy3; sc norm = std::sqrt( norm2 ); sc dot = xy1 * nx[ 0 ] + xy2 * nx[ 1 ] + xy3 * nx[ 2 ]; sc sqrt_d = std::sqrt( t ); sc value = dot / ( _four * _pi * norm2 ) * ( std::erf( norm / ( _two * sqrt_d * _sqrt_alpha ) ) / norm - _one / ( _sqrt_pi * sqrt_d * _sqrt_alpha ) * std::exp( -norm2 / ( _four * t * _alpha ) ) ); return value; } /** * Evaluates the first antiderivative. * @param[in] xy1 First coordinate of `x - y`. * @param[in] xy2 Second coordinate of `x - y`. * @param[in] xy3 Third coordinate of `x - y`. * @param[in] nx Normal in the `x` variable. */ #pragma omp declare simd uniform( this, nx ) simdlen( DATA_WIDTH ) sc do_anti_t_limit( sc xy1, sc xy2, sc xy3, const sc * nx ) const { sc norm2 = xy1 * xy1 + xy2 * xy2 + xy3 * xy3; sc norm = std::sqrt( norm2 ); sc dot = xy1 * nx[ 0 ] + xy2 * nx[ 1 ] + xy3 * nx[ 2 ]; sc value = dot / ( _four * _pi * norm2 * norm ); return value; } }; #endif /* INCLUDE_BESTHEA_SPACETIME_HEAT_INITIAL_M1_KERNEL_ANTIDERIVATIVE_H_ \ */
clipperz_srp_fmt_plug.c
/* This software was repurposed by Dhiru Kholia (dhiru at openwall.com) * in 2012. * * This software was written by Jim Fougeron jfoug AT cox dot net * in 2012. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2012 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. * * Format was busted, just like wow-srp. It ONLY was handling binary residue * if the residue was exactly 64 hex bytes long. Well for exponentation, it * does not have to be 64 bytes. It can be shorter. We also handle case where * a shorter result number is 0 Lpadded to an even 64 bytes. split() should * be added to canonize these hashes, since they are same hash with * multiple representations. * * This implements the SRP protocol, with Clipperz documented * implementation specifics. * * s = random salt value. * * v is the 'verifier' value (256 bit value). * * Clipperz's offline database has following relevant fields, * * <script>_clipperz_dump_data_ = { ... * * '2f2134e38b23534adfcd43c2f7223caf3a53a8db7ce800f1e918e8e0d06b8b7a': { * s: 'e0bc11ee4db80a3ecabd293f5201cb747856361192c68f4133ea707c7d4d2d32', * v: 'e8be8c8d9c1d5dc79ecc7b15d1787d5b5dc22e815ddb0b37f6145ca667421f1f * version: '0.2', * ... * } * P algorithm: * h1 = hashlib.sha256(password + username).digest() * P = h2 = hashlib.sha256(h1).hexdigest() * * x algorithm: * x1 = hashlib.sha256(s + P).digest() * x = hashlib.sha256(x1).hexdigest() * * v algorithm: * v = Clipperz.Crypto.SRP.g().powerModule(new Clipperz.Crypto.BigInt(x,16),Clipperz.Crypto.SRP.n()); * n = 125617018995153554710546479714086468244499594888726646874671447258204721048803 * g = 2 */ #if FMT_EXTERNS_H extern struct fmt_main fmt_clipperz; #elif FMT_REGISTERS_H john_register_one(&fmt_clipperz); #else #if AC_BUILT /* need to know if HAVE_LIBGMP is set, for autoconfig build */ #include "autoconfig.h" #endif #include <string.h> #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #ifdef HAVE_LIBGMP #if HAVE_GMP_GMP_H #include <gmp/gmp.h> #else #include <gmp.h> #endif #define EXP_STR " GMP-exp" #else #include <openssl/bn.h> #define EXP_STR " oSSL-exp" #endif #include "johnswap.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "Clipperz" #define FORMAT_NAME "SRP" #define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR EXP_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CLIPPERZSIG "$clipperz$" #define CLIPPERZSIGLEN (sizeof(CLIPPERZSIG)-1) #define PLAINTEXT_LENGTH 16 #define CIPHERTEXT_LENGTH 65 #define BINARY_SIZE 33 #define BINARY_ALIGN 4 #define FULL_BINARY_SIZE 33 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 1 #define USERNAMELEN 32 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 4 #define SZ 128 // salt is in hex (salt and salt2) static struct fmt_tests tests[] = { {CLIPPERZSIG"e8be8c8d9c1d5dc79ecc7b15d1787d5b5dc22e815ddb0b37f6145ca667421f1f$e0bc11ee4db80a3ecabd293f5201cb747856361192c68f4133ea707c7d4d2d32*hackme@mailinator.com", "openwall"}, {"$clipperz$05b18d6976d6cefad7c0c330c0c8a32ed69f19a8d68a94c3916c5ad1ba5ce37e5$RoljkWQajmS8OXFbsnqmZFTeB2How6hkoDd5QKu0DjthET3NmjTmOLumZe84nb7o*1", "password"}, {"$clipperz$5b18d6976d6cefad7c0c330c0c8a32ed69f19a8d68a94c3916c5ad1ba5ce37e5$RoljkWQajmS8OXFbsnqmZFTeB2How6hkoDd5QKu0DjthET3NmjTmOLumZe84nb7o*1", "password"}, {NULL} }; #ifdef HAVE_LIBGMP typedef struct t_SRP_CTX { mpz_t z_mod, z_base, z_exp, z_rop; } SRP_CTX; #else typedef struct t_SRP_CTX { BIGNUM *z_mod, *z_base, *z_exp, *z_rop; BN_CTX *BN_ctx; }SRP_CTX; #endif static SRP_CTX *pSRP_CTX; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; // BN_bn2bin sometimes tries to write 33 bytes, hence allow some padding! // that is because these are mod 0x115B8B692E0E045692CF280B436735C77A5A9E8A9E7ED56C965F87DB5B2A2ECE3 // which is a 65 hex digit number (33 bytes long). static uint32_t (*crypt_out)[(FULL_BINARY_SIZE/4) + 1]; static struct custom_salt { unsigned char saved_salt[SZ]; unsigned char user_id[SZ]; } *cur_salt; static int max_keys_per_crypt; static void init(struct fmt_main *self) { int i; #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); pSRP_CTX = mem_calloc_align(sizeof(*pSRP_CTX), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); max_keys_per_crypt = self->params.max_keys_per_crypt; for (i = 0; i < self->params.max_keys_per_crypt; ++i) { #ifdef HAVE_LIBGMP mpz_init_set_str(pSRP_CTX[i].z_mod, "125617018995153554710546479714086468244499594888726646874671447258204721048803", 10); mpz_init_set_str(pSRP_CTX[i].z_base, "2", 10); mpz_init_set_str(pSRP_CTX[i].z_exp, "1", 10); mpz_init(pSRP_CTX[i].z_rop); // Now, properly initialized mpz_exp, so it is 'large enough' to hold any SHA256 value // we need to put into it. Then we simply need to copy in the data, and possibly set // the limb count size. mpz_mul_2exp(pSRP_CTX[i].z_exp, pSRP_CTX[i].z_exp, 159); #else pSRP_CTX[i].z_mod=BN_new(); BN_dec2bn(&pSRP_CTX[i].z_mod, "125617018995153554710546479714086468244499594888726646874671447258204721048803"); pSRP_CTX[i].z_base=BN_new(); BN_set_word(pSRP_CTX[i].z_base, 2); pSRP_CTX[i].z_exp=BN_new(); pSRP_CTX[i].z_rop=BN_new(); pSRP_CTX[i].BN_ctx = BN_CTX_new(); #endif } } void done(void) { int i; for (i = 0; i < max_keys_per_crypt; ++i) { #ifdef HAVE_LIBGMP mpz_clear(pSRP_CTX[i].z_mod); mpz_clear(pSRP_CTX[i].z_base); mpz_clear(pSRP_CTX[i].z_exp); mpz_clear(pSRP_CTX[i].z_rop); #else BN_clear_free(pSRP_CTX[i].z_mod); BN_clear_free(pSRP_CTX[i].z_base); BN_clear_free(pSRP_CTX[i].z_exp); BN_clear_free(pSRP_CTX[i].z_rop); BN_CTX_free(pSRP_CTX[i].BN_ctx); #endif } MEM_FREE(pSRP_CTX); MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p = NULL; if (strncmp(ciphertext, CLIPPERZSIG, CLIPPERZSIGLEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += CLIPPERZSIGLEN; if ((p = strtokm(ctcopy, "$")) == NULL) goto err; if (strlen(p) > CIPHERTEXT_LENGTH) goto err; if (!ishex_oddOK(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) goto err; if (strlen(p) > SZ-1) goto err; if ((p = strtokm(NULL, "*")) == NULL) goto err; if (strlen(p) > SZ-1) goto err; if ((p = strtokm(NULL, "*"))) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char ct[128+2*SZ+1]; char *cp; if (strncmp(ciphertext, CLIPPERZSIG, CLIPPERZSIGLEN)) return ciphertext; strnzcpy(ct, ciphertext, sizeof(ct)); cp = strchr(&ct[CLIPPERZSIGLEN], '$'); if (!cp) return ciphertext; *cp = 0; strlwr(&ct[CLIPPERZSIGLEN]); *cp = '$'; if (ct[CLIPPERZSIGLEN] == '0') { char *cpi = &ct[CLIPPERZSIGLEN]; char *cpo = cpi; while (*cpi == '0') ++cpi; do { *cpo++ = *cpi; } while (*cpi++); } return ct; } static void *get_binary(char *ciphertext) { static union { unsigned char c[FULL_BINARY_SIZE]; uint32_t dummy[1]; } buf; unsigned char *out = buf.c; char *p, *q; int i; p = &ciphertext[CLIPPERZSIGLEN]; q = strchr(p, '$'); memset(buf.c, 0, sizeof(buf)); while (*p == '0') ++p; if ((q-p)&1) { out[0] = atoi16[ARCH_INDEX(*p)]; ++p; } else { out[0] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } for (i = 1; i < FULL_BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; if (p >= q) break; } return out; } static void *get_salt(char *ciphertext) { char *p; char *q; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); p = ciphertext; p = strchr(&ciphertext[CLIPPERZSIGLEN], '$') + 1; q = strrchr(ciphertext, '*'); strncpy((char*)cs.saved_salt, p, q - p); p = strrchr(ciphertext, '*') + 1; strcpy((char*)cs.user_id, p); return (void *)&cs; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int salt_hash(void *salt) { unsigned int hash = 0; char *p = (char *)salt; while (*p) { hash <<= 1; hash += (unsigned char)*p++; if (hash >> SALT_HASH_LOG) { hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); } } hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); return hash; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1); } static char *get_key(int index) { return saved_key[index]; } inline static void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int j; #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < count; ++j) { SHA256_CTX ctx; unsigned char Tmp[32]; unsigned char TmpHex[64]; memset(crypt_out[j], 0, sizeof(crypt_out[j])); SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[j], strlen(saved_key[j])); SHA256_Update(&ctx, cur_salt->user_id, strlen((char*)cur_salt->user_id)); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, Tmp, 32); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, cur_salt->saved_salt, strlen((char*)cur_salt->saved_salt)); hex_encode(Tmp, 32, TmpHex); SHA256_Update(&ctx, TmpHex, 64); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, Tmp, 32); SHA256_Final(Tmp, &ctx); #ifdef HAVE_LIBGMP { unsigned char HashStr[80], *p; int i, todo; p = HashStr; for (i = 0; i < 32; ++i) { *p++ = itoa16[Tmp[i]>>4]; *p++ = itoa16[Tmp[i]&0xF]; } *p = 0; mpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16); mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod ); mpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop); p = HashStr; todo = strlen((char*)p); if (todo&1) { ((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)]; ++p; --todo; } else { ((unsigned char*)(crypt_out[j]))[0] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; todo -= 2; } todo >>= 1; for (i = 1; i <= todo; i++) { ((unsigned char*)(crypt_out[j]))[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } } #else // using oSSL's BN to do expmod. pSRP_CTX[j].z_exp = BN_bin2bn(Tmp,32,pSRP_CTX[j].z_exp); BN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx); BN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j])); #endif } return count; } static int cmp_all(void *binary, int count) { int i; for (i = 0; i < count; ++i) { if (*((uint32_t*)binary) == *((uint32_t*)(crypt_out[i]))) return 1; } return 0; } static int cmp_one(void *binary, int index) { return *((uint32_t*)binary) == *((uint32_t*)(crypt_out[index])); } static int cmp_exact(char *source, int index) { return !memcmp(get_binary(source), crypt_out[index], BINARY_SIZE); } struct fmt_main fmt_clipperz = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { CLIPPERZSIG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
dfe.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/openmp/blrm/dfe.c * @version 1.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" double starsh_blrm__dfe_omp(STARSH_blrm *matrix) //! Approximation error in Frobenius norm of double precision matrix. /*! Measure error of approximation of a dense matrix by block-wise low-rank * matrix. * * @param[in] matrix: Block-wise low-rank matrix. * @return Error of approximation. * @ingroup blrm * */ { STARSH_blrm *M = matrix; STARSH_blrf *F = M->format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; // Shortcuts to information about clusters STARSH_cluster *R = F->row_cluster; STARSH_cluster *C = F->col_cluster; void *RD = R->data, *CD = C->data; // Number of far-field and near-field blocks STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near, bi; STARSH_int nblocks = nblocks_far+nblocks_near; // Shortcut to all U and V factors Array **U = M->far_U, **V = M->far_V; // Special constant for symmetric case double sqrt2 = sqrt(2.); // Temporary arrays to compute norms more precisely with dnrm2 double block_norm[nblocks], far_block_diff[nblocks_far]; double *far_block_norm = block_norm; double *near_block_norm = block_norm+nblocks_far; char symm = F->symm; int info = 0; // Simple cycle over all far-field blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_far; bi++) { if(info != 0) continue; // Get indexes and sizes of block row and column STARSH_int i = F->block_far[2*bi]; STARSH_int j = F->block_far[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; // Rank of a block int rank = M->far_rank[bi]; // Temporary array for more precise dnrm2 double *D, D_norm[ncols]; size_t D_size = (size_t)nrows*(size_t)ncols; STARSH_PMALLOC(D, D_size, info); // Get actual elements of a block kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j], RD, CD, D, nrows); // Get Frobenius norm of a block for(size_t k = 0; k < ncols; k++) D_norm[k] = cblas_dnrm2(nrows, D+k*nrows, 1); double tmpnorm = cblas_dnrm2(ncols, D_norm, 1); far_block_norm[bi] = tmpnorm; // Get difference of initial and approximated block cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, nrows, ncols, rank, -1., U[bi]->data, nrows, V[bi]->data, ncols, 1., D, nrows); // Compute Frobenius norm of the latter for(size_t k = 0; k < ncols; k++) D_norm[k] = cblas_dnrm2(nrows, D+k*nrows, 1); free(D); double tmpdiff = cblas_dnrm2(ncols, D_norm, 1); far_block_diff[bi] = tmpdiff; if(i != j && symm == 'S') { // Multiply by square root of 2 in symmetric case // (work on 1 block instead of 2 blocks) far_block_norm[bi] *= sqrt2; far_block_diff[bi] *= sqrt2; } } if(info != 0) return -1; // Need to rework this (since double is returned, // not Error code) if(M->onfly == 0) // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_near; bi++) { // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; // Compute norm of a block double *D = M->near_D[bi]->data, D_norm[ncols]; for(size_t k = 0; k < ncols; k++) D_norm[k] = cblas_dnrm2(nrows, D+k*nrows, 1); near_block_norm[bi] = cblas_dnrm2(ncols, D_norm, 1); if(i != j && symm == 'S') // Multiply by square root of 2 in symmetric case near_block_norm[bi] *= sqrt2; } else // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_near; bi++) { if(info != 0) continue; // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; double *D, D_norm[ncols]; // Allocate temporary array and fill it with elements of a block STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info); kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j], RD, CD, D, nrows); // Compute norm of a block for(size_t k = 0; k < ncols; k++) D_norm[k] = cblas_dnrm2(nrows, D+k*nrows, 1); // Free temporary buffer free(D); near_block_norm[bi] = cblas_dnrm2(ncols, D_norm, 1); if(i != j && symm == 'S') // Multiply by square root of 2 ub symmetric case near_block_norm[bi] *= sqrt2; } if(info != 0) return -1; // Need to rework this, since returned value is double, // not error code // Get difference of initial and approximated matrices double diff = cblas_dnrm2(nblocks_far, far_block_diff, 1); // Get norm of initial matrix double norm = cblas_dnrm2(nblocks, block_norm, 1); return diff/norm; }
omp_for_private.c
// RUN: %libomp-compile-and-run // REQUIRES: !(abt && (clang || gcc)) #include <stdio.h> #include <math.h> #include "omp_testsuite.h" /* Utility function do spend some time in a loop */ static void do_some_work() { int i; double sum = 0; for(i = 0; i < 1000; i++){ sum += sqrt ((double) i); } } int sum1; #pragma omp threadprivate(sum1) int test_omp_for_private() { int sum = 0; int sum0; int known_sum; sum0 = 0; /* setting (global) sum0 = 0 */ #pragma omp parallel { sum1 = 0; /* setting sum1 in each thread to 0 */ { /* begin of orphaned block */ int i; #pragma omp for private(sum0) schedule(static,1) for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum1; #pragma omp flush sum0 = sum0 + i; do_some_work (); #pragma omp flush sum1 = sum0; } } /* end of orphaned block */ #pragma omp critical { sum = sum + sum1; } /*end of critical*/ } /* end of parallel*/ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return (known_sum == sum); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_private()) { num_failed++; } } return num_failed; }
if-clause.c
/* $ export OMP_NUM_THREADS=3 $ ./bin/if-clause 4 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 0 suma de a[2]=2 sumalocal=3 thread 0 suma de a[3]=3 sumalocal=6 thread master=0 imprime suma=6 Ya que no se realiza el fork join de las hebras en la region parallel if(n>4) $ ./bin/if-clause 5 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 1 suma de a[2]=2 sumalocal=2 thread 1 suma de a[3]=3 sumalocal=5 thread 2 suma de a[4]=4 sumalocal=4 thread master=0 imprime suma=10 Ejecucion con omp_set_num_threads(5); ---------------------------------------------- $ ./bin/if-clause 10 thread 4 suma de a[8]=8 sumalocal=8 thread 4 suma de a[9]=9 sumalocal=17 thread 1 suma de a[2]=2 sumalocal=2 thread 1 suma de a[3]=3 sumalocal=5 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 3 suma de a[6]=6 sumalocal=6 thread 3 suma de a[7]=7 sumalocal=13 thread 2 suma de a[4]=4 sumalocal=4 thread 2 suma de a[5]=5 sumalocal=9 thread master=0 imprime suma=45 Ejecucion con omp_set_num_threads(2); ---------------------------------------------- $ ./bin/if-clause 10 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 0 suma de a[2]=2 sumalocal=3 thread 0 suma de a[3]=3 sumalocal=6 thread 0 suma de a[4]=4 sumalocal=10 thread 1 suma de a[5]=5 sumalocal=5 thread 1 suma de a[6]=6 sumalocal=11 thread 1 suma de a[7]=7 sumalocal=18 thread 1 suma de a[8]=8 sumalocal=26 thread 1 suma de a[9]=9 sumalocal=35 thread master=0 imprime suma=45 Si no especificamos el numero de hebras se utiliza tantas hebras como cores tengamos en el sistema, sino variable de entorno sino funcion omp_set_num_threads() 1- omp_set_num_threads 2- export OMP_NUM_THREADS 3- default numCores */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv){ int i, n=20, tid; int a[n],suma=0,sumalocal; if(argc < 2) { fprintf(stderr,"[ERROR]-Falta iteraciones y num_threads\n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) { a[i] = i; } // podemos especificar el numero de hebras directamente omp_set_num_threads(2); #pragma omp parallel if(n>4) default(none) private(sumalocal,tid) shared(a,suma,n) { sumalocal=0; tid=omp_get_thread_num(); // nowait no necesito que las demas hebras terminen el for porque como tenemos un atomc despues, cuando entre las demas se pararan // hasta que la hebra termine de escribir en suma #pragma omp for private(i) schedule(static) nowait for (i=0; i<n; i++){ sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } }
particleSimulator-mpi.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> #include <math.h> #include <mpi.h> #include <unistd.h> #include <sys/types.h> #include "parseFlags.h" #define maxlength 1024 typedef struct { double x; double y; } coordinates; typedef struct { coordinates position; coordinates velocity; double mass; double size; } particle; void usage ( char *progName ) { printf ( "usage: %s input particle.01.data dt 0.01 numSteps 50 output tmp/timestep threads 10 time [0|1]\n", progName ); } int main(int argc, char **argv) { int i,j,k,size,numsteps,rank,hostcount; int numthreads,timecheck,numFlags; int num, wsize,displacement,filesize; double dt,tmp; double start,end; double fx, fy; double dx, dy, dist, f; float px,py,vx,vy,mass; char *input, *output, *outputnumbered; particle *p,*sp,*wp;; coordinates *force; FILE *infile,*outfile; int error; int blockcounts[2]; MPI_Aint offsets[2], extent; MPI_Datatype coordinateType, particleType, oldtypes[2]; int *displs; int *sendcounts; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&hostcount); MPI_Comm_rank(MPI_COMM_WORLD,&rank); /* Set up particle structure MPI data type */ offsets[0] = 0; oldtypes[0] = MPI_DOUBLE; blockcounts[0] = 2; MPI_Type_struct(1,blockcounts,offsets,oldtypes,&coordinateType); MPI_Type_commit(&coordinateType); offsets[0] = 0; oldtypes[0] = coordinateType; blockcounts[0] = 2; MPI_Type_extent(coordinateType,&extent); offsets[1] = 2 * extent; oldtypes[1] = MPI_DOUBLE; blockcounts[1] = 2; MPI_Type_struct(2,blockcounts,offsets,oldtypes,&particleType); MPI_Type_commit(&particleType); /* setup done */ /* get all the command-line arguments and put them on the books */ commandLineFlagType flag[] = { /* stringToBeMatched, variableType, pointerToVariable */ {"input", _string, &input }, {"dt", _double, &dt }, {"time", _int, &timecheck }, {"numSteps", _int, &numsteps }, {"output", _string, &output }, {"threads", _int, &numthreads } }; numFlags = sizeof ( flag ) / sizeof ( commandLineFlagType ); usageErrorType parseErrorCode = parseArgs ( argc, argv, numFlags, flag ) ; if ( parseErrorCode == argError || parseErrorCode == parseError || argc < 7) { usage( argv[0] ); MPI_Finalize(); exit(1); } if(rank == 0){ infile = fopen(input,"r"); if(infile == NULL){ perror(input); error = 1; } else error = 0; } MPI_Bcast(&error,1,MPI_INT,0,MPI_COMM_WORLD); if(error == 1){ MPI_Finalize(); exit(1); } if(rank == 0){ filesize = lseek(fileno(infile),0,SEEK_END); lseek(fileno(infile),-filesize,SEEK_CUR); p = malloc(sizeof(particle)*(filesize/44)); size = 0; while(fscanf(infile,"%f %f %f %f %f",&px,&py,&vx,&vy,&mass) > 0) { p[size].position.x = px; p[size].position.y = py; p[size].velocity.x = vx; p[size].velocity.y = vy; p[size].mass = mass; p[size].size = sqrt(p[size].mass); size++; }; fclose(infile); if(timecheck) start = omp_get_wtime(); wsize = size; sendcounts = malloc(sizeof(int)*hostcount); displs = malloc(sizeof(int)*hostcount); size = wsize/hostcount; for(i = 0; i < hostcount; i++){ sendcounts[i] = size; } if(size*hostcount < wsize) sendcounts[hostcount-1] += wsize - (size * hostcount); for(i = 0; i < hostcount; i++){ displs[i] = size*i; } } /* Send the size of each subarray to the respective host */ MPI_Scatter(sendcounts,1,MPI_INT,&size,1,MPI_INT,0,MPI_COMM_WORLD); /* Send the total number of particles to every host */ MPI_Bcast(&wsize,1,MPI_INT,0,MPI_COMM_WORLD); outputnumbered = malloc(sizeof(char)*maxlength); force = malloc(sizeof(coordinates)*size); wp = malloc(sizeof(particle)*wsize); sp = malloc(sizeof(particle)*size); omp_set_num_threads(numthreads); /* send the all the particles to each host */ MPI_Scatter(p,size,particleType,wp,size,particleType,0,MPI_COMM_WORLD); /* give the displacement amount to every host */ MPI_Scatter(displs,1,MPI_INT,&displacement,1,MPI_INT,0,MPI_COMM_WORLD); sp = wp+displacement; /* get position of sublist from the displacement */ for(i = 0; i < numsteps; i++) { #pragma omp parallel for shared(wp,force,wsize,sp) private(fx,fy) for(j = 0; j < size; j++){ /* Each Particle in sublist */ fx = 0; fy = 0; #pragma omp parallel for shared(wp,j,sp) private(dist,f,dx,dy) reduction(+:fx,fy) for(k = 0; k < wsize; k++){ /* calculate the forces of each kth particle on the jth particle */ if (j != k) { dx = (sp[j].position.x - wp[k].position.x); dy = (sp[j].position.y - wp[k].position.y); dist = hypot(dx,dy); if(dist > .03){ f = -.02 * sp[j].mass * wp[k].mass; fx += dx/dist*f; fy += dy/dist*f; } } } force[j].x = fx; force[j].y = fy; } #pragma omp parallel for shared(sp,force,dt) private(tmp) for(j = 0; j < size; j++){ sp[j].velocity.x += force[j].x/sp[j].mass * dt; sp[j].velocity.y += force[j].y/sp[j].mass * dt; tmp = sp[j].position.x + sp[j].velocity.x * dt; if(tmp <= -3 || tmp >= 3) { sp[j].velocity.x /= -300; tmp = sp[j].position.x + sp[j].velocity.x * dt; } sp[j].position.x = tmp; tmp = sp[j].position.y + sp[j].velocity.y * dt; if(tmp <= -3|| tmp >= 3) { sp[j].velocity.y /= -300; tmp = sp[j].position.y + sp[j].velocity.y * dt; } sp[j].position.y = tmp; } /* on each host, gather the new working particle list from all other hosts */ MPI_Gatherv(sp,size,particleType,wp,sendcounts,displs,particleType,0,MPI_COMM_WORLD); MPI_Bcast(wp,wsize,particleType,0,MPI_COMM_WORLD); /* The the root finds an error with opening file, tell every host to shutdown with an error. */ if(rank == 0){ snprintf(outputnumbered,maxlength,"%s.%d.txt",output,i); outfile = fopen(outputnumbered,"w"); if(outfile == NULL){ perror(outputnumbered); error = 1; } else error = 0; } MPI_Bcast(&error,1,MPI_INT,0,MPI_COMM_WORLD); if(error == 1){ MPI_Finalize(); exit(1); } if(rank == 0){ /* Print the timestep */ for(j = 0; j < size; j++){ fprintf(outfile,"%f %f %f %f %f\n",wp[j].position.x,wp[j].position.y,wp[j].velocity.x,wp[j].velocity.y,wp[j].mass); } fclose(outfile); } } /* numsteps for-loop end */ if(timecheck && rank == 0){ end = omp_get_wtime(); printf("%d %f\n",hostcount,end-start); } MPI_Finalize(); }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator( llvm::function_ref<void(const Designation &)> CodeCompleteCB); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitInfo::OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitInfo::OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitInfo::OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitInfo::OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitInfo::OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitInfo::OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLastLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); NamedDecl * ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
GB_unop__identity_fc64_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_int16) // op(A') function: GB (_unop_tran__identity_fc64_int16) // C type: GxB_FC64_t // A type: int16_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_int16) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nqueens.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project (by Keith Randall) * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ #include <stdlib.h> #include <stdio.h> #include <memory.h> #include <alloca.h> #include "bots.h" #include "app-desc.h" #include <omp.h> /* Checking information */ static int solutions[] = { 1, 0, 0, 2, 10, /* 5 */ 4, 40, 92, 352, 724, /* 10 */ 2680, 14200, 73712, 365596, }; #define MAX_SOLUTIONS sizeof(solutions)/sizeof(int) #ifdef FORCE_TIED_TASKS int mycount=0; #pragma omp threadprivate(mycount) #endif int total_count; /* * <a> contains array of <n> queen positions. Returns 1 * if none of the queens conflict, and returns 0 otherwise. */ int ok(int n, char *a) { int i, j; char p, q; for (i = 0; i < n; i++) { p = a[i]; for (j = i + 1; j < n; j++) { q = a[j]; if (q == p || q == p - (j - i) || q == p + (j - i)) return 0; } } return 1; } #ifndef FORCE_TIED_TASKS void nqueens_ser (int n, int j, char *a, int *solutions) #else void nqueens_ser (int n, int j, char *a) #endif { #ifndef FORCE_TIED_TASKS int res; #endif int i; if (n == j) { /* good solution, count it */ #ifndef FORCE_TIED_TASKS *solutions = 1; #else mycount++; #endif return; } #ifndef FORCE_TIED_TASKS *solutions = 0; #endif /* try each possible position for queen <j> */ for (i = 0; i < n; i++) { { /* allocate a temporary array and copy <a> into it */ a[j] = (char) i; if (ok(j + 1, a)) { #ifndef FORCE_TIED_TASKS nqueens_ser(n, j + 1, a,&res); *solutions += res; #else nqueens_ser(n, j + 1, a); #endif } } } } #if defined(IF_CUTOFF) #ifndef FORCE_TIED_TASKS void nqueens(int n, int j, char *a, int *solutions, int depth) #else void nqueens(int n, int j, char *a, int depth) #endif { #ifndef FORCE_TIED_TASKS int *csols; #endif int i; if (n == j) { /* good solution, count it */ #ifndef FORCE_TIED_TASKS *solutions = 1; #else mycount++; #endif return; } #ifndef FORCE_TIED_TASKS *solutions = 0; csols = alloca(n*sizeof(int)); memset(csols,0,n*sizeof(int)); #endif /* try each possible position for queen <j> */ for (i = 0; i < n; i++) { #pragma omp task untied if(depth < bots_cutoff_value) { /* allocate a temporary array and copy <a> into it */ char * b = alloca(n * sizeof(char)); memcpy(b, a, j * sizeof(char)); b[j] = (char) i; if (ok(j + 1, b)) #ifndef FORCE_TIED_TASKS nqueens(n, j + 1, b,&csols[i],depth+1); #else nqueens(n, j + 1, b,depth+1); #endif } } #pragma omp taskwait #ifndef FORCE_TIED_TASKS for ( i = 0; i < n; i++) *solutions += csols[i]; #endif } #elif defined(FINAL_CUTOFF) #ifndef FORCE_TIED_TASKS void nqueens(int n, int j, char *a, int *solutions, int depth) #else void nqueens(int n, int j, char *a, int depth) #endif { #ifndef FORCE_TIED_TASKS int *csols; #endif int i; if (n == j) { /* good solution, count it */ #ifndef FORCE_TIED_TASKS *solutions += 1; #else mycount++; #endif return; } #ifndef FORCE_TIED_TASKS char final = omp_in_final(); if ( !final ) { *solutions = 0; csols = alloca(n*sizeof(int)); memset(csols,0,n*sizeof(int)); } #endif /* try each possible position for queen <j> */ for (i = 0; i < n; i++) { #pragma omp task untied final(depth+1 >= bots_cutoff_value) mergeable { char *b; int *sol; if ( omp_in_final() && depth+1 > bots_cutoff_value ) { b = a; #ifndef FORCE_TIED_TASKS sol = solutions; #endif } else { /* allocate a temporary array and copy <a> into it */ b = alloca(n * sizeof(char)); memcpy(b, a, j * sizeof(char)); #ifndef FORCE_TIED_TASKS sol = &csols[i]; #endif } b[j] = i; if (ok(j + 1, b)) #ifndef FORCE_TIED_TASKS nqueens(n, j + 1, b,sol,depth+1); #else nqueens(n, j + 1, b,depth+1); #endif } } #pragma omp taskwait #ifndef FORCE_TIED_TASKS if ( !final ) { for ( i = 0; i < n; i++) *solutions += csols[i]; } #endif } #elif defined(MANUAL_CUTOFF) #ifndef FORCE_TIED_TASKS void nqueens(int n, int j, char *a, int *solutions, int depth) #else void nqueens(int n, int j, char *a, int depth) #endif { #ifndef FORCE_TIED_TASKS int *csols; #endif int i; if (n == j) { /* good solution, count it */ #ifndef FORCE_TIED_TASKS *solutions = 1; #else mycount++; #endif return; } #ifndef FORCE_TIED_TASKS *solutions = 0; csols = alloca(n*sizeof(int)); memset(csols,0,n*sizeof(int)); #endif /* try each possible position for queen <j> */ for (i = 0; i < n; i++) { if ( depth < bots_cutoff_value ) { #pragma omp task untied { /* allocate a temporary array and copy <a> into it */ char * b = alloca(n * sizeof(char)); memcpy(b, a, j * sizeof(char)); b[j] = (char) i; if (ok(j + 1, b)) #ifndef FORCE_TIED_TASKS nqueens(n, j + 1, b,&csols[i],depth+1); #else nqueens(n, j + 1, b,depth+1); #endif } } else { a[j] = (char) i; if (ok(j + 1, a)) #ifndef FORCE_TIED_TASKS nqueens_ser(n, j + 1, a,&csols[i]); #else nqueens_ser(n, j + 1, a); #endif } } #pragma omp taskwait #ifndef FORCE_TIED_TASKS for ( i = 0; i < n; i++) *solutions += csols[i]; #endif } #else #ifndef FORCE_TIED_TASKS void nqueens(int n, int j, char *a, int *solutions, int depth) #else void nqueens(int n, int j, char *a, int depth) #endif { #ifndef FORCE_TIED_TASKS int *csols; #endif int i; if (n == j) { /* good solution, count it */ #ifndef FORCE_TIED_TASKS *solutions = 1; #else mycount++; #endif return; } #ifndef FORCE_TIED_TASKS *solutions = 0; csols = alloca(n*sizeof(int)); memset(csols,0,n*sizeof(int)); #endif /* try each possible position for queen <j> */ for (i = 0; i < n; i++) { #pragma omp task untied { /* allocate a temporary array and copy <a> into it */ char * b = alloca(n * sizeof(char)); memcpy(b, a, j * sizeof(char)); b[j] = (char) i; if (ok(j + 1, b)) #ifndef FORCE_TIED_TASKS nqueens(n, j + 1, b,&csols[i],depth); //FIXME: depth or depth+1 ??? #else nqueens(n, j + 1, b,depth); //FIXME: see above #endif } } #pragma omp taskwait #ifndef FORCE_TIED_TASKS for ( i = 0; i < n; i++) *solutions += csols[i]; #endif } #endif void find_queens (int size) { total_count=0; bots_message("Computing N-Queens algorithm (n=%d) ", size); #pragma omp parallel { #pragma omp single { char *a; a = alloca(size * sizeof(char)); #ifndef FORCE_TIED_TASKS nqueens(size, 0, a, &total_count,0); #else nqueens(size, 0, a, 0); #endif } #ifdef FORCE_TIED_TASKS #pragma omp atomic total_count += mycount; #endif } bots_message(" completed!\n"); } int verify_queens (int size) { if ( size > MAX_SOLUTIONS ) return BOTS_RESULT_NA; if ( total_count == solutions[size-1]) return BOTS_RESULT_SUCCESSFUL; return BOTS_RESULT_UNSUCCESSFUL; }
nvector_openmpdev.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2020, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for an OpenMP DEV implementation * of the NVECTOR module. * -----------------------------------------------------------------*/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <nvector/nvector_openmpdev.h> #include <sundials/sundials_math.h> #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v) { return SUNDIALS_NVEC_OPENMPDEV; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length) { N_Vector v; N_VectorContent_OpenMPDEV content; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV; v->ops->nvclone = N_VClone_OpenMPDEV; v->ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV; v->ops->nvdestroy = N_VDestroy_OpenMPDEV; v->ops->nvspace = N_VSpace_OpenMPDEV; v->ops->nvgetlength = N_VGetLength_OpenMPDEV; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_OpenMPDEV; v->ops->nvconst = N_VConst_OpenMPDEV; v->ops->nvprod = N_VProd_OpenMPDEV; v->ops->nvdiv = N_VDiv_OpenMPDEV; v->ops->nvscale = N_VScale_OpenMPDEV; v->ops->nvabs = N_VAbs_OpenMPDEV; v->ops->nvinv = N_VInv_OpenMPDEV; v->ops->nvaddconst = N_VAddConst_OpenMPDEV; v->ops->nvdotprod = N_VDotProd_OpenMPDEV; v->ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV; v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV; v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV; v->ops->nvmin = N_VMin_OpenMPDEV; v->ops->nvwl2norm = N_VWL2Norm_OpenMPDEV; v->ops->nvl1norm = N_VL1Norm_OpenMPDEV; v->ops->nvcompare = N_VCompare_OpenMPDEV; v->ops->nvinvtest = N_VInvTest_OpenMPDEV; v->ops->nvconstrmask = N_VConstrMask_OpenMPDEV; v->ops->nvminquotient = N_VMinQuotient_OpenMPDEV; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_OpenMPDEV; v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMPDEV; v->ops->nvminlocal = N_VMin_OpenMPDEV; v->ops->nvl1normlocal = N_VL1Norm_OpenMPDEV; v->ops->nvinvtestlocal = N_VInvTest_OpenMPDEV; v->ops->nvconstrmasklocal = N_VConstrMask_OpenMPDEV; v->ops->nvminquotientlocal = N_VMinQuotient_OpenMPDEV; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMPDEV; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMPDEV; /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = length; content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_OpenMPDEV(sunindextype length) { N_Vector v; realtype *data; realtype *dev_data; int dev; v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Update ownership */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if (data == NULL) { N_VDestroy(v); return(NULL); } /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if (dev_data == NULL) { N_VDestroy(v); return(NULL); } /* Attach data */ NV_DATA_HOST_OMPDEV(v) = data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata, realtype *d_vdata) { N_Vector v; int dev, host; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); if (length > 0) { /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Attach data */ NV_OWN_DATA_OMPDEV(v) = SUNFALSE; NV_DATA_HOST_OMPDEV(v) = h_vdata; NV_DATA_DEV_OMPDEV(v) = d_vdata; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w) { N_Vector *vs; int j; if (count <= 0) return(NULL); vs = NULL; vs = (N_Vector *) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = NULL; vs[j] = N_VClone_OpenMPDEV(w); if (vs[j] == NULL) { N_VDestroyVectorArray_OpenMPDEV(vs, j-1); return(NULL); } } return(vs); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w) { N_Vector *vs; int j; if (count <= 0) return(NULL); vs = NULL; vs = (N_Vector *) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = NULL; vs[j] = N_VCloneEmpty_OpenMPDEV(w); if (vs[j] == NULL) { N_VDestroyVectorArray_OpenMPDEV(vs, j-1); return(NULL); } } return(vs); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_OpenMPDEV */ void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count) { int j; for (j = 0; j < count; j++) N_VDestroy_OpenMPDEV(vs[j]); free(vs); vs = NULL; return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_OpenMPDEV(N_Vector v) { return NV_LENGTH_OMPDEV(v); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the host. */ realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_HOST_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the device. */ realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_DEV_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to print a vector to stdout */ void N_VPrint_OpenMPDEV(N_Vector x) { N_VPrintFile_OpenMPDEV(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print a vector to outfile */ void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMPDEV(x); xd = NV_DATA_HOST_OMPDEV(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%11.8g\n", xd[i]); #else fprintf(outfile, "%11.8g\n", xd[i]); #endif } fprintf(outfile, "\n"); return; } /* ---------------------------------------------------------------------------- * Function to copy host array into device array */ void N_VCopyToDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from host to device */ omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host); return; } /* ---------------------------------------------------------------------------- * Function to copy device array into host array */ void N_VCopyFromDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from device to host */ omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w) { N_Vector v; N_VectorContent_OpenMPDEV content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = NV_LENGTH_OMPDEV(w); content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_OpenMPDEV(N_Vector w) { N_Vector v; realtype *data; realtype *dev_data; sunindextype length; int dev; v = NULL; v = N_VCloneEmpty_OpenMPDEV(w); if (v == NULL) return(NULL); length = NV_LENGTH_OMPDEV(w); /* Create data */ if (length > 0) { /* Update ownership flag */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if (data == NULL) { N_VDestroy(v); return(NULL); } /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if (dev_data == NULL) { N_VDestroy(v); return(NULL); } /* Attach data */ NV_DATA_HOST_OMPDEV(v)= data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_OpenMPDEV(N_Vector v) { int dev; if (v == NULL) return; /* free content */ if (v->content != NULL) { /* free data arrays if they are owned by the vector */ if (NV_OWN_DATA_OMPDEV(v)) { if (NV_DATA_HOST_OMPDEV(v) != NULL) { free(NV_DATA_HOST_OMPDEV(v)); NV_DATA_HOST_OMPDEV(v) = NULL; } if (NV_DATA_DEV_OMPDEV(v) != NULL) { dev = omp_get_default_device(); omp_target_free(NV_DATA_DEV_OMPDEV(v), dev); NV_DATA_DEV_OMPDEV(v) = NULL; } } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for N_Vector */ void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_OMPDEV(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Compute linear combination z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd_dev, *yd_dev, *zd_dev; N_Vector v1, v2; booleantype test; int dev; xd_dev = yd_dev = zd_dev = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_OpenMPDEV(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_OpenMPDEV(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_OpenMPDEV(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_OpenMPDEV(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_OpenMPDEV(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_OpenMPDEV(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_OpenMPDEV(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_OpenMPDEV(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,a,b) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_OpenMPDEV(realtype c, N_Vector z) { sunindextype i, N; realtype *zd_dev; int dev; zd_dev = NULL; N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c; return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]*yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]/yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_OpenMPDEV(c, x); return; } if (c == ONE) { VCopy_OpenMPDEV(x, z); } else if (c == -ONE) { VNeg_OpenMPDEV(x, z); } else { N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*xd_dev[i]; } return; } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = SUNRabs(xd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = ONE/xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,b) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+b; return; } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; sum = ZERO; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += xd_dev[i]*yd_dev[i]; } return(sum); } /* ---------------------------------------------------------------------------- * Computes max norm of a vector */ realtype N_VMaxNorm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype max, *xd_dev; int dev; max = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:max) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1) for (i = 0; i < N; i++) { max = SUNMAX(SUNRabs(xd_dev[i]), max); } return(max); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w) { return(SUNRsqrt(N_VWSqrSumLocal_OpenMPDEV(x, w)/(NV_LENGTH_OMPDEV(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMPDEV(x, w, id) / (NV_LENGTH_OMPDEV(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a vector */ realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(sum); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a masked vector */ realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev, *idd_dev; int dev; sum = ZERO; xd_dev = wd_dev = idd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { if (idd_dev[i] > ZERO) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } } return(sum); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype min, *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(from:min) is_device_ptr(xd_dev) device(dev) #pragma omp teams num_teams(1) { min = xd_dev[0]; #pragma omp distribute parallel for reduction(min:min) schedule(static, 1) for (i = 1; i < N; i++) { min = SUNMIN(xd_dev[i], min); } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(SUNRsqrt(sum)); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype sum, *xd_dev; int dev; sum = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i<N; i++) sum += SUNRabs(xd_dev[i]); return(sum); } /* ---------------------------------------------------------------------------- * Compare vector component values to a scaler */ void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO; return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO */ booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev, val; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); val = ZERO; #pragma omp target map(to:N) map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1) for (i = 0; i < N; i++) { if (xd_dev[i] == ZERO) val = ONE; else zd_dev[i] = ONE/xd_dev[i]; } if (val > ZERO) return (SUNFALSE); else return (SUNTRUE); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd_dev, *xd_dev, *md_dev; int dev; cd_dev = xd_dev = md_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); cd_dev = NV_DATA_DEV_OMPDEV(c); md_dev = NV_DATA_DEV_OMPDEV(m); /* get default device identifier */ dev = omp_get_default_device(); temp = ONE; #pragma omp target map(to:N) map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1) for (i = 0; i < N; i++) { md_dev[i] = ZERO; if (cd_dev[i] == ZERO) continue; if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) { if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; } continue; } if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) { if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; } } } if (temp == ONE) return (SUNTRUE); else return(SUNFALSE); } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom) { sunindextype i, N; realtype *nd_dev, *dd_dev, min; int dev; nd_dev = dd_dev = NULL; N = NV_LENGTH_OMPDEV(num); nd_dev = NV_DATA_DEV_OMPDEV(num); dd_dev = NV_DATA_DEV_OMPDEV(denom); /* get default device identifier */ dev = omp_get_default_device(); min = BIG_REAL; #pragma omp target map(to:N) map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1) for (i = 0; i < N; i++) if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min); return(min); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i, dev; realtype to_add; /* temporary variable to hold sum being added in atomic operation */ sunindextype j, N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store X dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } /* * X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1 */ if (X[0] == z) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) { #pragma omp teams distribute parallel for schedule(static,1) for (j=0; j<N; j++) zd_dev[j] *= c[0]; } #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } /* * z = sum{ c[i] * X[i] }, i = 0,...,nvec-1 */ xd_dev = NV_DATA_DEV_OMPDEV(X[0]); #pragma omp target map(to:N,c[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) { #pragma omp teams distribute parallel for schedule(static, 1) for (j=0; j<N; j++) { zd_dev[j] = c[0] * xd_dev[j]; } } #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += a[i] * xd_dev[j]; } } free(yd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j]; } } free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods) { int i, dev; sunindextype j, N; realtype sum; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype** yd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VDotProd */ if (nvec == 1) { dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); /* initialize dot products */ for (i=0; i<nvec; i++) { dotprods[i] = ZERO; } /* Allocate and store dev pointers to copy to device */ yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); /* compute multiple dot products */ #pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) for (j=0; j<N; j++) sum += xd_dev[j] * yd_dev[j]; dotprods[i] += sum; } free(yd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------- */ int N_VLinearSumVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; N_Vector* V1; N_Vector* V2; booleantype test; realtype c; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]); return(0); } /* BLAS usage: axpy y <- ax+y */ if ((b == ONE) && (Z == Y)) return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y)); /* BLAS usage: axpy x <- by+x */ if ((a == ONE) && (Z == X)) return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X)); /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z)); /* Cases: */ /* (1) a == 1.0, b = -1.0, */ /* (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { V1 = test ? Y : X; V2 = test ? X : Y; return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z)); } /* Cases: */ /* (1) a == 1.0, b == other or 0.0, */ /* (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z)); } /* Cases: */ /* (1) a == -1.0, b != 1.0, */ /* (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z)); } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z)); /* Case: a == -b */ if (a == -b) return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z)); /* Do all cases not handled above: */ /* (1) a == other, b == 0.0 - user should have called N_VScale */ /* (2) a == 0.0, b == other - user should have called N_VScale */ /* (3) a,b == other, a !=b, a != -b */ /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* compute linear sum for each vector pair in vector arrays */ #pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = a * xd_dev[j] + b * yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) { xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); } /* * X[i] *= c[i] */ if (X == Z) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) xd_dev[j] *= c[i]; } } free(xd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* * Z[i] = c[i] * X[i] */ #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c[i] * xd_dev[j]; } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* zd_dev=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VConst */ if (nvec == 1) { N_VConst_OpenMPDEV(c, Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get device */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* set each vector in the vector array to a constant */ #pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c; } } free(zd_dev_ptrs); return(0); } int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm) { int i, dev; sunindextype j, N; realtype sum; realtype* wd_dev=NULL; realtype* xd_dev=NULL; realtype** wd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* initialize norms */ for (i=0; i<nvec; i++) nrm[i] = ZERO; /* Allocate and store dev pointers to copy to device */ wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); /* compute the WRMS norm for each vector in the vector array */ #pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \ is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; wd_dev = wd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) { for (j=0; j<N; j++) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } nrm[i] = SUNRsqrt(sum/N); } } free(wd_dev_ptrs); free(xd_dev_ptrs); return(0); } int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm) { int i, dev; sunindextype j, N; realtype sum; realtype* wd_dev=NULL; realtype* xd_dev=NULL; realtype* idd_dev=NULL; realtype** wd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id); return(0); } /* get vector length and mask data array */ N = NV_LENGTH_OMPDEV(X[0]); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); /* initialize norms */ for (i=0; i<nvec; i++) nrm[i] = ZERO; /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]); /* compute the WRMS norm for each vector in the vector array */ #pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \ is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; wd_dev = wd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) { for (j=0; j<N; j++) { if (idd_dev[j] > ZERO) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } } nrm[i] = SUNRsqrt(sum/N); } } free(xd_dev_ptrs); free(wd_dev_ptrs); return(0); } int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { int i, j, dev; sunindextype k, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; int retval; N_Vector* YY; N_Vector* ZZ; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VLinearSum */ if (nsum == 1) { N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]); return(0); } /* should have called N_VScaleAddMulti */ YY = (N_Vector *) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (j=0; j<nsum; j++) { YY[j] = Y[j][0]; ZZ[j] = Z[j][0]; } retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ); free(YY); free(ZZ); return(retval); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) { for (j=0; j<nsum; j++) yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]); } /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \ is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; for (j=0; j<nsum; j++) { yd_dev = yd_dev_ptrs[i*nsum+j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) yd_dev[k] += a[j] * xd_dev[k]; } } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i<nvec; i++) { for (j=0; j<nsum; j++) zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]); } /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; for (j=0; j<nsum; j++) { yd_dev = yd_dev_ptrs[i*nsum+j]; zd_dev = zd_dev_ptrs[i*nsum+j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k]; } } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { int i; /* vector arrays index in summation [0,nsum) */ int j; /* vector index in vector array [0,nvec) */ sunindextype k; /* element index in vector [0,N) */ sunindextype N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** zd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; int dev; realtype* ctmp; N_Vector* Y; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VScale */ if (nsum == 1) { N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]); return(0); } /* should have called N_VLinearSum */ if (nsum == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]); return(0); } /* should have called N_VLinearCombination */ Y = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (i=0; i<nsum; i++) { Y[i] = X[i][0]; } N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]); free(Y); return(0); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; j<nvec; j++) { ctmp[j] = c[0]; } N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z); free(ctmp); return(0); } /* should have called N_VLinearSumVectorArray */ if (nsum == 2) { N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z); return(0); } /* -------------------------- * Compute linear combination * -------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (j=0; j<nvec; j++) zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]); for (j=0; j<nvec; j++) { for (i=0; i<nsum; i++) xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]); } /* * X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1 */ if ((X[0] == Z) && (c[0] == ONE)) { #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { zd_dev = zd_dev_ptrs[j]; for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1 */ if (X[0] == Z) { #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { zd_dev = zd_dev_ptrs[j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] *= c[0]; for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1 */ #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { /* scale first vector in the sum into the output vector */ xd_dev = xd_dev_ptrs[j*nsum]; zd_dev = zd_dev_ptrs[j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] = c[0] * xd_dev[k]; /* scale and sum remaining vectors into the output vector */ for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * private functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Copy vector components into a second vector */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector sum */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]-yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute the negative of a vector */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = -xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector sum */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*(xd_dev[i]+yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute scaled vector difference */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*(xd_dev[i]-yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute vector sum z[i] = a*x[i]+y[i] */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference z[i] = a*x[i]-y[i] */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])-yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute special cases of linear sum */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y) { sunindextype i, N; realtype *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); if (a == ONE) { #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] += xd_dev[i]; return; } if (a == -ONE) { #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] -= xd_dev[i]; return; } #pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] += a*xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector x[i] = a*x[i] */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x) { sunindextype i, N; realtype *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,a) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) xd_dev[i] *= a; return; } /* * ----------------------------------------------------------------- * private functions for special cases of vector array operations * ----------------------------------------------------------------- */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = xd_dev[j] + yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = xd_dev[j] - yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c * (xd_dev[j] + yd_dev[j]); } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev ointer to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c * (xd_dev[j] - yd_dev[j]); } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = (a * xd_dev[j]) + yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = (a * xd_dev[j]) - yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); if (a == ONE) { #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } if (a == -ONE) { #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] -= xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += a * xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; else v->ops->nvdotprodmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); }
DemBones.h
/////////////////////////////////////////////////////////////////////////////// // Dem Bones - Skinning Decomposition Library // // Copyright (c) 2019, Electronic Arts. All rights reserved. // /////////////////////////////////////////////////////////////////////////////// #ifndef DEM_BONES_DEM_BONES #define DEM_BONES_DEM_BONES #include <Eigen/Dense> #include <Eigen/Sparse> #include <Eigen/StdVector> #include <algorithm> #include <queue> #include "ConvexLS.h" #ifndef DEM_BONES_MAT_BLOCKS #include "MatBlocks.h" #define DEM_BONES_DEM_BONES_MAT_BLOCKS_UNDEFINED #endif namespace Dem { /** @mainpage Overview Main elements: - @ref DemBones : base class with the core solver using relative bone transformations DemBones::m - @ref DemBonesExt : extended class to handle hierarchical skeleton with local rotations/translations and bind matrices - DemBones/MatBlocks.h: macros to access sub-blocks of packing transformation/position matrices for convenience Include DemBones/DemBonesExt.h (or DemBones/DemBones.h) with optional DemBones/MatBlocks.h then follow these steps to use the library: -# Load required data in the base class: - Rest shapes: DemBones::u, DemBones::fv, DemBones::nV - Sequence: DemBones::v, DemBones::nF, DemBones::fStart, DemBones::subjectID, DemBones::nS - Number of bones: DemBones::nB -# Load optional data in the base class: - Skinning weights: DemBones::w - Bone transformations: DemBones::m -# [@c optional] Set parameters in the base class: - DemBones::nIters - DemBones::nInitIters - DemBones::nTransIters, DemBones::transAffine, DemBones::transAffineNorm - DemBones::nWeightsIters, DemBones::nnz, DemBones::weightsSmooth, DemBones::weightsSmoothStep, DemBones::weightEps -# [@c optional] Setup extended class: - Load data: DemBonesExt::parent, DemBonesExt::preMulInv, DemBonesExt::rotOrder, DemBonesExt::bind - Set paramter: DemBonesExt::bindUpdate -# [@c optional] Override callback functions (cb...) in the base class @ref DemBones -# Call decomposition function DemBones::compute(), DemBones::computeWeights(), DemBones::computeTranformations(), or DemBones::init() -# [@c optional] Get local transformations/bind poses with DemBonesExt::computeRTB() */ /** @class DemBones DemBones.h "DemBones/DemBones.h" @brief Smooth skinning decomposition with rigid bones and sparse, convex weights @details Setup the required data, parameters, and call either compute(), computeWeights(), computeTranformations(), or init(). Callback functions and read-only values can be used to report progress: cbInitSplitBegin(), cbInitSplitEnd(), cbIterBegin(), cbIterEnd(), cbWeightsBegin(), cbWeightsEnd(), cbTranformationsBegin(), cbTransformationsEnd(), cbTransformationsIterBegin(), cbTransformationsIterEnd(), cbWeightsIterBegin(), cbWeightsIterEnd(), rmse(), #iter, #iterTransformations, #iterWeights. @b _Scalar is the floating-point data type. @b _AniMeshScalar is the floating-point data type of mesh sequence #v. */ template<class _Scalar, class _AniMeshScalar> class DemBones { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW using MatrixX=Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Matrix4=Eigen::Matrix<_Scalar, 4, 4>; using Matrix3=Eigen::Matrix<_Scalar, 3, 3>; using VectorX=Eigen::Matrix<_Scalar, Eigen::Dynamic, 1>; using Vector4=Eigen::Matrix<_Scalar, 4, 1>; using Vector3=Eigen::Matrix<_Scalar, 3, 1>; using SparseMatrix=Eigen::SparseMatrix<_Scalar>; using Triplet=Eigen::Triplet<_Scalar>; //! [@c parameter] Number of global iterations, @c default = 30 int nIters; //! [@c parameter] Number of clustering update iterations in the initalization, @c default = 10 int nInitIters; //! [@c parameter] Number of bone transformations update iterations per global iteration, @c default = 5 int nTransIters; //! [@c parameter] Translations affinity soft constraint, @c default = 10.0 _Scalar transAffine; //! [@c parameter] p-norm for bone translations affinity soft constraint, @c default = 4.0 _Scalar transAffineNorm; //! [@c parameter] Number of weights update iterations per global iteration, @c default = 3 int nWeightsIters; //! [@c parameter] Number of non-zero weights per vertex, @c default = 8 int nnz; //! [@c parameter] Weights smoothness soft constraint, @c default = 1e-4 _Scalar weightsSmooth; //! [@c parameter] Step size for the weights smoothness soft constraint, @c default = 1.0 _Scalar weightsSmoothStep; //! [@c parameter] Epsilon for weights solver, @c default = 1e-15 _Scalar weightEps; /** @brief Constructor and setting default parameters */ DemBones(): nIters(30), nInitIters(10), nTransIters(5), transAffine(_Scalar(10)), transAffineNorm(_Scalar(4)), nWeightsIters(3), nnz(8), weightsSmooth(_Scalar(1e-4)), weightsSmoothStep(_Scalar(1)), weightEps(_Scalar(1e-15)), iter(_iter), iterTransformations(_iterTransformations), iterWeights(_iterWeights) { clear(); } //! Number of vertices, typically indexed by @p i int nV; //! Number of bones, typically indexed by @p j int nB; //! Number of subjects, typically indexed by @p s int nS; //! Number of total frames, typically indexed by @p k, #nF = #fStart(#nS) int nF; //! Start frame indices, @c size = #nS+1, #fStart(@p s), #fStart(@p s+1) are data frames for subject @p s Eigen::VectorXi fStart; //! Subject index of the frame, @c size = #nF, #subjectID(@p k)=@p s, where #fStart(@p s) <= @p k < #fStart(<tt>s</tt>+1) Eigen::VectorXi subjectID; //! Geometry at the rest poses, @c size = [3*#nS, #nV], #u.@a col(@p i).@a segment(3*@p s, 3) is the rest pose of vertex @p i of subject @p s MatrixX u; //! Skinning weights, @c size = [#nB, #nV], #w.@a col(@p i) are the skinning weights of vertex @p i, #w(@p j, @p i) is the influence of bone @p j to vertex @p i SparseMatrix w; /** @brief Bone transformations, @c size = [4*#nF*4, 4*#nB], #m.@a blk4(@p k, @p j) is the 4*4 relative transformation matrix of bone @p j at frame @p k @details Note that the transformations are relative, that is #m.@a blk4(@p k, @p j) brings the global transformation of bone @p j from the rest pose to the pose at frame @p k. */ MatrixX m; //! Animated mesh sequence, @c size = [3*#nF, #nV], #v.@a col(@p i).@a segment(3*@p k, 3) is the position of vertex @p i at frame @p k Eigen::Matrix<_AniMeshScalar, Eigen::Dynamic, Eigen::Dynamic> v; //! Mesh topology, @c size=[<tt>number of polygons</tt>], #fv[@p p] is the vector of vertex indices of polygon @p p std::vector<std::vector<int>> fv; //! [<tt>zero indexed</tt>, <tt>read only</tt>] Current global iteration number that can be used for callback functions const int& iter; //! [<tt>zero indexed</tt>, <tt>read only</tt>] Current bone transformations update iteration number that can be used for callback functions const int& iterTransformations; //! [<tt>zero indexed</tt>, <tt>read only</tt>] Current weights update iteration number that can be used for callback functions const int& iterWeights; /** Clear all data */ void clear() { nV=nB=nS=nF=0; fStart.resize(0); subjectID.resize(0); u.resize(0, 0); w.resize(0, 0); m.resize(0, 0); v.resize(0, 0); fv.resize(0); modelSize=-1; laplacian.resize(0, 0); } /** @brief Initialize missing skinning weights and/or bone transformations @details Depending on the status of #w and #m, this function will: - Both #w and #m are already set: do nothing - Only one in #w or #m is missing (zero size): initialize missing matrix, i.e. #w (or #m) - Both #w and #m are missing (zero size): initialize both with rigid skinning using approximately #nB bones, i.e. values of #w are 0 or 1. LBG-VQ clustering is peformed using mesh sequence #v, rest pose geometries #u and topology #fv. @b Note: as the initialization does not use exactly #nB bones, the value of #nB could be changed when both #w and #m are missing. This function is called at the begining of every compute update functions as a safeguard. */ void init() { if (modelSize<0) modelSize=sqrt((u-(u.rowwise().sum()/nV).replicate(1, nV)).squaredNorm()/nV/nS); if (laplacian.cols()!=nV) computeSmoothSolver(); if (((int)w.rows()!=nB)||((int)w.cols()!=nV)) { //No skinning weight if (((int)m.rows()!=nF*4)||((int)m.cols()!=nB*4)) { //No transformation int targetNB=nB; //LBG-VQ nB=1; label=Eigen::VectorXi::Zero(nV); computeTransFromLabel(); bool cont=true; while (cont) { cbInitSplitBegin(); split(targetNB, nnz); cont=(nB<targetNB); for (int rep=0; rep<nInitIters; rep++) { computeTransFromLabel(); computeLabel(); pruneBones(nnz); } cbInitSplitEnd(); } m.conservativeResize(nF*4, nB*4); labelToWeights(); } else initWeights(); //Has transformations } else { //Has skinning weights if (((int)m.rows()!=nF*4)||((int)m.cols()!=nB*4)) { //No transformation m=Matrix4::Identity().replicate(nF, nB); } } } /** @brief Update bone transformations by running #nTransIters iterations with #transAffine and #transAffineNorm regularizers @details Required input data: - Rest shapes: #u, #fv, #nV - Sequence: #v, #nF, #fStart, #subjectID, #nS - Number of bones: #nB Optional input data: - Skinning weights: #w - Bone transformations: #m Output: #m. Missing #w and/or #m (with zero size) will be initialized by init(). */ void computeTranformations() { if (nTransIters==0) return; init(); cbTranformationsBegin(); compute_vuT(); compute_uuT(); for (_iterTransformations=0; _iterTransformations<nTransIters; _iterTransformations++) { cbTransformationsIterBegin(); #pragma omp parallel for for (int k=0; k<nF; k++) for (int j=0; j<nB; j++) { Matrix4 qpT=vuT.blk4(k, j); for (int it=uuT.outerIdx(j); it<uuT.outerIdx(j + 1); it++) if (uuT.innerIdx(it)!=j) qpT-=m.blk4(k, uuT.innerIdx(it))*uuT.val.blk4(subjectID(k), it); qpT2m(qpT, k, j); } cbTransformationsIterEnd(); } cbTransformationsEnd(); } /** @brief Update skinning weights by running #nWeightsIters iterations with #weightsSmooth and #weightsSmoothStep regularizers @details Required input data: - Rest shapes: #u, #fv, #nV - Sequence: #v, #nF, #fStart, #subjectID, #nS - Number of bones: #nB Optional input data: - Skinning weights: #w - Bone transformations: #m Output: #w. Missing #w and/or #m (with zero size) will be initialized by init(). */ void computeWeights() { if (nWeightsIters==0) return; init(); cbWeightsBegin(); compute_mTm(); aTb=MatrixX::Zero(nB, nV); wSolver.init(nnz); std::vector<Triplet, Eigen::aligned_allocator<Triplet>> trip; trip.reserve(nV*nnz); for (_iterWeights=0; _iterWeights<nWeightsIters; _iterWeights++) { cbWeightsIterBegin(); compute_ws(); compute_aTb(); double reg=pow(modelSize, 2)*nF*weightsSmooth; trip.clear(); #pragma omp parallel for for (int i=0; i<nV; i++) { MatrixX aTai; compute_aTa(i, aTai); aTai+=reg*MatrixX::Identity(nB, nB); VectorX aTbi=aTb.col(i)+reg*ws.col(i); VectorX x=ws.col(i); Eigen::ArrayXi idx=Eigen::ArrayXi::LinSpaced(nB, 0, nB-1); std::sort(idx.data(), idx.data()+nB, [&x](int i1, int i2) { return x(i1)>x(i2); }); int nnzi=std::min(nnz, nB); while (x(idx(nnzi-1))<weightEps) nnzi--; x=indexing_vector(w.col(i).toDense().cwiseMax(0.0), idx.head(nnzi)); _Scalar s=x.sum(); if (s>_Scalar(0.1)) x/=s; else x=VectorX::Constant(nnzi, _Scalar(1)/nnzi); wSolver.solve(indexing_row_col(aTai, idx.head(nnzi), idx.head(nnzi)), indexing_vector(aTbi, idx.head(nnzi)), x, true, true); #pragma omp critical for (int j=0; j<nnzi; j++) if (x(j)!=0) trip.push_back(Triplet(idx[j], i, x(j))); } w.resize(nB, nV); w.setFromTriplets(trip.begin(), trip.end()); cbWeightsIterEnd(); } cbWeightsEnd(); } /** @brief Skinning decomposition by #nIters iterations of alternative updating weights and bone transformations @details Required input data: - Rest shapes: #u, #fv, #nV - Sequence: #v, #nF, #fStart, #subjectID, #nS - Number of bones: #nB Optional input data: - Skinning weights: #w - Bone transformations: #m Output: #w, #m. Missing #w and/or #m (with zero size) will be initialized by init(). */ void compute() { init(); for (_iter=0; _iter<nIters; _iter++) { cbIterBegin(); computeTranformations(); computeWeights(); cbIterEnd(); } } //! @return Root mean squared reconstruction error _Scalar rmse() { _Scalar e=0; #pragma omp parallel for for (int i=0; i<nV; i++) { _Scalar ei = 0; Matrix4 mki; for (int k=0; k<nF; k++) { mki.setZero(); for (typename SparseMatrix::InnerIterator it(w, i); it; ++it) mki+=it.value()*m.blk4(k, it.row()); ei+=(mki.template topLeftCorner<3, 3>()*u.vec3(subjectID(k), i)+mki.template topRightCorner<3, 1>()-v.vec3(k, i).template cast<_Scalar>()).squaredNorm(); } #pragma omp atomic e+=ei; } return std::sqrt(e/nF/nV); } //! Callback function invoked before each spliting of bone clusters in initialization virtual void cbInitSplitBegin() {} //! Callback function invoked after each spliting of bone clusters in initialization virtual void cbInitSplitEnd() {} //! Callback function invoked before each global iteration update virtual void cbIterBegin() {} //! Callback function invoked after each global iteration update virtual void cbIterEnd() {} //! Callback function invoked before each skinning weights update virtual void cbWeightsBegin() {} //! Callback function invoked after each skinning weights update virtual void cbWeightsEnd() {} //! Callback function invoked before each bone transformations update virtual void cbTranformationsBegin() {} //! Callback function invoked after each bone transformations update virtual void cbTransformationsEnd() {} //! Callback function invoked before each local bone transformations update iteration virtual void cbTransformationsIterBegin() {} //! Callback function invoked after each local bone transformations update iteration virtual void cbTransformationsIterEnd() {} //! Callback function invoked before each local weights update iteration virtual void cbWeightsIterBegin() {} //! Callback function invoked after each local weights update iteration virtual void cbWeightsIterEnd() {} private: int _iter, _iterTransformations, _iterWeights; /** Best rigid transformation from covariance matrix @param _qpT is the 4*4 covariance matrix @param k is the frame number @param j is the bone index */ void qpT2m(const Matrix4& _qpT, int k, int j) { if (_qpT(3, 3)!=0) { Matrix4 qpT=_qpT/_qpT(3, 3); Eigen::JacobiSVD<Matrix3> svd(qpT.template topLeftCorner<3, 3>()-qpT.template topRightCorner<3, 1>()*qpT.template bottomLeftCorner<1, 3>(), Eigen::ComputeFullU|Eigen::ComputeFullV); Matrix3 d=Matrix3::Identity(); d(2, 2)=(svd.matrixU()*svd.matrixV().transpose()).determinant(); m.rotMat(k, j)=svd.matrixU()*d*svd.matrixV().transpose(); m.transVec(k, j)=qpT.template topRightCorner<3, 1>()-m.rotMat(k, j)*qpT.template bottomLeftCorner<1, 3>().transpose(); } } /** Fitting error @param i is the vertex index @param j is the bone index */ _Scalar errorVtxBone(int i, int j, bool par=true) { _Scalar e=0; #pragma omp parallel for if(par) for (int k=0; k<nF; k++) #pragma omp atomic e+=(m.rotMat(k, j)*u.vec3(subjectID(k), i)+m.transVec(k, j)-v.vec3(k, i).template cast<_Scalar>()).squaredNorm(); return e; } //! label(i) is the index of the bone associated with vertex i Eigen::VectorXi label; //! Comparator for heap with smallest values on top struct TripletLess { bool operator() (const Triplet& t1, const Triplet& t2) { return t1.value()>t2.value(); } }; /** Update labels of vertices */ void computeLabel() { VectorX ei(nV); Eigen::VectorXi seed=Eigen::VectorXi::Constant(nB, -1); VectorX gMin(nB); #pragma omp parallel for for (int i=0; i<nV; i++) { int j=label(i); if (j!=-1) { ei(i)=errorVtxBone(i, j, false); if ((seed(j)==-1)||(ei(i)<gMin(j))) { #pragma omp critical if ((seed(j)==-1)||(ei(i)<gMin(j))) { gMin(j)=ei(i); seed(j)=i; } } } } std::priority_queue<Triplet, std::vector<Triplet, Eigen::aligned_allocator<Triplet>>, TripletLess> heap; for (int j=0; j<nB; j++) if (seed(j)!=-1) heap.push(Triplet(j, seed(j), ei(seed(j)))); if (laplacian.cols()!=nV) computeSmoothSolver(); std::vector<bool> dirty(nV, true); while (!heap.empty()) { Triplet top=heap.top(); heap.pop(); int i=(int)top.col(); int j=(int)top.row(); if (dirty[i]) { label(i)=j; ei(i)=top.value(); dirty[i]=false; for (typename SparseMatrix::InnerIterator it(laplacian, i); it; ++it) { int i2=(int)it.row(); if (dirty[i2]) { double tmp=(label(i2)==j)?ei(i2):errorVtxBone(i2, j); heap.push(Triplet(j, i2, tmp)); } } } } #pragma omp parallel for for (int i=0; i<nV; i++) if (label(i)==-1) { _Scalar gMin; for (int j=0; j<nB; j++) { _Scalar ej=errorVtxBone(i, j, false); if ((label(i)==-1)||(gMin>ej)) { gMin=ej; label(i)=j; } } } } /** Update bone transformation from label */ void computeTransFromLabel() { m=Matrix4::Identity().replicate(nF, nB); #pragma omp parallel for for (int k=0; k<nF; k++) { MatrixX qpT=MatrixX::Zero(4, 4*nB); for (int i=0; i<nV; i++) if (label(i)!=-1) qpT.blk4(0, label(i))+=Vector4(v.vec3(k, i).template cast<_Scalar>().homogeneous())*u.vec3(subjectID(k), i).homogeneous().transpose(); for (int j=0; j<nB; j++) qpT2m(qpT.blk4(0, j), k, j); } } /** Set matrix w from label */ void labelToWeights() { std::vector<Triplet, Eigen::aligned_allocator<Triplet>> trip(nV); for (int i=0; i<nV; i++) trip[i]=Triplet(label(i), i, _Scalar(1)); w.resize(nB, nV); w.setFromTriplets(trip.begin(), trip.end()); } /** Split bone clusters @param maxB is the maximum number of bones @param threshold*2 is the minimum size of the bone cluster to be splited */ void split(int maxB, int threshold) { //Centroids MatrixX cu=MatrixX::Zero(3*nS, nB); Eigen::VectorXi s=Eigen::VectorXi::Zero(nB); for (int i=0; i<nV; i++) { cu.col(label(i))+=u.col(i); s(label(i))++; } for (int j=0; j<nB; j++) if (s(j)!=0) cu.col(j)/=_Scalar(s(j)); //Seed & cluster error Eigen::VectorXi seed=Eigen::VectorXi::Constant(nB, -1); VectorX gMax(nB); VectorX ce=VectorX::Zero(nB); #pragma omp parallel for for (int i=0; i<nV; i++) { int j=label(i); double e=errorVtxBone(i, j, false); #pragma omp atomic ce(j)+=e; double tmp=e*(u.col(i)-cu.col(j)).squaredNorm(); if ((seed(j)==-1)||(tmp>gMax(j))) { #pragma omp critical if ((seed(j)==-1)||(tmp>gMax(j))) { gMax(j)=tmp; seed(j)=i; } } } int countID=nB; _Scalar avgErr=ce.sum()/nB; for (int j=0; j<nB; j++) if ((countID<maxB)&&(s(j)>threshold*2)&&(ce(j)>avgErr/100)) { int newLabel=countID++; int i=seed(j); for (typename SparseMatrix::InnerIterator it(laplacian, i); it; ++it) label(it.row())=newLabel; } nB=countID; } /** Remove bones with small number of associated vertices @param threshold is the minimum number of vertices assigned to a bone */ void pruneBones(int threshold) { Eigen::VectorXi s=Eigen::VectorXi::Zero(nB); #pragma omp parallel for for (int i=0; i<nV; i++) { #pragma omp atomic s(label(i))++; } Eigen::VectorXi newID(nB); int countID=0; for (int j=0; j<nB; j++) if (s(j)<threshold) newID(j)=-1; else newID(j)=countID++; if (countID==nB) return; for (int j=0; j<nB; j++) if (newID(j)!=-1) m.template middleCols<4>(newID(j)*4)=m.template middleCols<4>(j*4); #pragma omp parallel for for (int i=0; i<nV; i++) label(i)=newID(label(i)); nB=countID; m.conservativeResize(nF*4, nB*4); computeLabel(); } /** Initialize skinning weights with rigid bind to the best bone */ void initWeights() { label=Eigen::VectorXi::Constant(nV, -1); #pragma omp parallel for for (int i=0; i<nV; i++) { _Scalar gMin; for (int j=0; j<nB; j++) { _Scalar ej=errorVtxBone(i, j, false); if ((label(i)==-1)||(gMin>ej)) { gMin=ej; label(i)=j; } } } computeLabel(); labelToWeights(); } //! vuT.blk4(k, j) = \sum_{i=0}^{nV-1} w(j, i)*v.vec3(k, i).homogeneous()*u.vec3(subjectID(k), i).homogeneous()^T MatrixX vuT; /** Pre-compute vuT with bone translations affinity soft constraint */ void compute_vuT() { vuT=MatrixX::Zero(nF*4, nB*4); #pragma omp parallel for for (int k=0; k<nF; k++) { MatrixX vuTp=MatrixX::Zero(4, nB*4); for (int i=0; i<nV; i++) for (typename SparseMatrix::InnerIterator it(w, i); it; ++it) { Matrix4 tmp=Vector4(v.vec3(k, i).template cast<_Scalar>().homogeneous())*u.vec3(subjectID(k), i).homogeneous().transpose(); vuT.blk4(k, it.row())+=it.value()*tmp; vuTp.blk4(0, it.row())+=pow(it.value(), transAffineNorm)*tmp; } for (int j=0; j<nB; j++) if (vuTp(3, j*4+3)!=0) vuT.blk4(k, j)+=(transAffine*vuT(k*4+3, j*4+3)/vuTp(3, j*4+3))*vuTp.blk4(0, j); } } //! uuT is a sparse block matrix, uuT(j, k).block<4, 4>(s*4, 0) = \sum{i=0}{nV-1} w(j, i)*w(k, i)*u.col(i).segment<3>(s*3).homogeneous().transpose()*u.col(i).segment<3>(s*3).homogeneous() struct SparseMatrixBlock { EIGEN_MAKE_ALIGNED_OPERATOR_NEW MatrixX val; Eigen::VectorXi innerIdx, outerIdx; } uuT; /** Pre-compute uuT for bone transformations update */ void compute_uuT() { Eigen::MatrixXi pos=Eigen::MatrixXi::Constant(nB, nB, -1); #pragma omp parallel for for (int i=0; i<nV; i++) for (typename SparseMatrix::InnerIterator it(w, i); it; ++it) for (typename SparseMatrix::InnerIterator jt(w, i); jt; ++jt) pos(it.row(), jt.row())=1; uuT.outerIdx.resize(nB+1); uuT.innerIdx.resize(nB*nB); int nnz=0; for (int j=0; j<nB; j++) { uuT.outerIdx(j)=nnz; for (int i=0; i<nB; i++) if (pos(i, j)!=-1) { uuT.innerIdx(nnz)=i; pos(i, j)=nnz++; } } uuT.outerIdx(nB)=nnz; uuT.innerIdx.conservativeResize(nnz); uuT.val=MatrixX::Zero(nS*4, nnz*4); #pragma omp parallel for for (int i=0; i<nV; i++) for (typename SparseMatrix::InnerIterator it(w, i); it; ++it) for (typename SparseMatrix::InnerIterator jt(w, i); jt; ++jt) if (it.row()>=jt.row()) { double _w=it.value()*jt.value(); MatrixX _uuT(4*nS, 4); Vector4 _u; for (int s=0; s<nS; s++) { _u=u.vec3(s, i).homogeneous(); _uuT.blk4(s, 0)=_w*_u*_u.transpose(); } int p=pos(it.row(), jt.row())*4; for (int c=0; c<4; c++) for (int r=0; r<4*nS; r++) #pragma omp atomic uuT.val(r, p+c)+=_uuT(r, c); } for (int i=0; i<nB; i++) for (int j=i+1; j<nB; j++) if (pos(i, j)!=-1) uuT.val.middleCols(pos(i, j)*4, 4)=uuT.val.middleCols(pos(j, i)*4, 4); } //! mTm.size = (4*nS*nB, 4*nB), where mTm.block<4, 4>(s*nB+i, j) = \sum_{k=fStart(s)}^{fStart(s+1)-1} m.block<3, 4>(k*4, i*4)^T*m.block<3, 4>(k*4, j*4) MatrixX mTm; /** Pre-compute mTm for weights update */ void compute_mTm() { Eigen::MatrixXi idx(2, nB*(nB+1)/2); int nPairs=0; for (int i=0; i<nB; i++) for (int j=i; j<nB; j++) { idx(0, nPairs)=i; idx(1, nPairs)=j; nPairs++; } mTm=MatrixX::Zero(nS*nB*4, nB*4); #pragma omp parallel for for (int p=0; p<nPairs; p++) { int i=idx(0, p); int j=idx(1, p); for (int k=0; k<nF; k++) mTm.blk4(subjectID(k)*nB+i, j)+=m.blk4(k, i).template topRows<3>().transpose()*m.blk4(k, j).template topRows<3>(); if (i!=j) for (int s=0; s<nS; s++) mTm.blk4(s*nB+j, i)=mTm.blk4(s*nB+i, j); } } //! aTb.col(i) is the A^Tb for vertex i, where A.size = (3*nF, nB), A.col(j).segment<3>(f*3) is the transformed position of vertex i by bone j at frame f, b = v.col(i). MatrixX aTb; /** Pre-compute aTb for weights update */ void compute_aTb() { #pragma omp parallel for for (int i=0; i<nV; i++) for (int j=0; j<nB; j++) if ((aTb(j, i)==0)&&(ws(j, i)>weightEps)) for (int k=0; k<nF; k++) aTb(j, i)+=v.vec3(k, i).template cast<_Scalar>().dot(m.blk4(k, j).template topRows<3>()*u.vec3(subjectID(k), i).homogeneous()); } //! Size of the model=RMS distance to centroid _Scalar modelSize; //! Laplacian matrix SparseMatrix laplacian; //! LU factorization of Laplacian Eigen::SparseLU<SparseMatrix> smoothSolver; /** Pre-compute Laplacian and LU factorization */ void computeSmoothSolver() { int nFV=(int)fv.size(); _Scalar epsDis=0; for (int f=0; f<nFV; f++) { int nf=(int)fv[f].size(); for (int g=0; g<nf; g++) { int i=fv[f][g]; int j=fv[f][(g+1)%nf]; epsDis+=(u.col(i)-u.col(j)).norm(); } } epsDis=epsDis*weightEps/(_Scalar)nS; std::vector<Triplet, Eigen::aligned_allocator<Triplet>> triplet; VectorX d=VectorX::Zero(nV); #pragma omp parallel for for (int f=0; f<nFV; f++) { int nf=(int)fv[f].size(); for (int g=0; g<nf; g++) { int i=fv[f][g]; int j=fv[f][(g+1)%nf]; if (i<j) { double val=0; for (int s=0; s<nS; s++) { double du=(u.vec3(s, i)-u.vec3(s, j)).norm(); for (int k=fStart(s); k<fStart(s+1); k++) val+=pow((v.vec3(k, i).template cast<_Scalar>()-v.vec3(k, j).template cast<_Scalar>()).norm()-du, 2); } val=1/(sqrt(val/nF)+epsDis); #pragma omp critical triplet.push_back(Triplet(i, j, -val)); #pragma omp atomic d(i)+=val; #pragma omp critical triplet.push_back(Triplet(j, i, -val)); #pragma omp atomic d(j)+=val; } } } for (int i=0; i<nV; i++) triplet.push_back(Triplet(i, i, d(i))); laplacian.resize(nV, nV); laplacian.setFromTriplets(triplet.begin(), triplet.end()); for (int i=0; i<nV; i++) if (d(i)!=0) laplacian.row(i)/=d(i); laplacian=weightsSmoothStep*laplacian+SparseMatrix((VectorX::Ones(nV)).asDiagonal()); smoothSolver.compute(laplacian); } //! Smoothed skinning weights MatrixX ws; /** Implicit skinning weights Laplacian smoothing */ void compute_ws() { ws=w.transpose(); #pragma omp parallel for for (int j=0; j<nB; j++) ws.col(j)=smoothSolver.solve(ws.col(j)); ws.transposeInPlace(); #pragma omp parallel for for (int i=0; i<nV; i++) { ws.col(i)=ws.col(i).cwiseMax(0.0); _Scalar si=ws.col(i).sum(); if (si<_Scalar(0.1)) ws.col(i)=VectorX::Constant(nB, _Scalar(1)/nB); else ws.col(i)/=si; } } //! Per-vertex weights solver ConvexLS<_Scalar> wSolver; /** Pre-compute aTa for weights update on one vertex @param i is the vertex index. @param aTa is the by-reference output of A^TA for vertex i, where A.size = (3*nF, nB), A.col(j).segment<3>(f*3) is the transformed position of vertex i by bone j at frame f. */ void compute_aTa(int i, MatrixX& aTa) { aTa=MatrixX::Zero(nB, nB); for (int j1=0; j1<nB; j1++) for (int j2=j1; j2<nB; j2++) { for (int s=0; s<nS; s++) aTa(j1, j2)+=u.vec3(s, i).homogeneous().dot(mTm.blk4(s*nB+j1, j2)*u.vec3(s, i).homogeneous()); if (j1!=j2) aTa(j2, j1)=aTa(j1, j2); } } }; } #ifdef DEM_BONES_DEM_BONES_MAT_BLOCKS_UNDEFINED #undef blk4 #undef rotMat #undef transVec #undef vec3 #undef DEM_BONES_MAT_BLOCKS #endif #endif
damax.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/dzamax.c, normal z -> d, Fri Sep 28 17:38:01 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" /******************************************************************************/ int plasma_damax(plasma_enum_t colrow, int m, int n, double *pA, int lda, double *values) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((colrow != PlasmaColumnwise) && (colrow != PlasmaRowwise)) { plasma_error("illegal value of colrow"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } // quick return if (imin(n, m) == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. double *work; switch (colrow) { case PlasmaColumnwise: work = (double*)malloc((size_t)A.mt*A.n*sizeof(double)); break; case PlasmaRowwise: work = (double*)malloc((size_t)A.m*A.nt*sizeof(double)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_damax(colrow, A, work, values, &sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /******************************************************************************/ void plasma_omp_damax(plasma_enum_t colrow, plasma_desc_t A, double *work, double *values, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((colrow != PlasmaColumnwise) && (colrow != PlasmaRowwise)) { plasma_error("illegal value of colrow"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) return; // Call the parallel function. plasma_pdamax(colrow, A, work, values, sequence, request); }
nodal_residualbased_elimination_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi, Alessandro Franci // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER) #define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ // #define USE_GOOGLE_HASH #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "pfem_fluid_dynamics_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedEliminationBuilderAndSolver * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedEliminationBuilderAndSolver : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolver); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { // KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolver") << "Using the standard builder and solver " << std::endl; } /** Destructor. */ ~NodalResidualBasedEliminationBuilderAndSolver() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetMaterialPropertiesToFluid( ModelPart::NodeIterator itNode, double &density, double &deviatoricCoeff, double &volumetricCoeff, double timeInterval, double nodalVolume) { density = itNode->FastGetSolutionStepValue(DENSITY); deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); if (volumetricCoeff > 0) { volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); double bulkReduction = density * nodalVolume / (timeInterval * volumetricCoeff); volumetricCoeff *= bulkReduction; } } void BuildFluidNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; /* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */ //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; double theta = 0.5; array_1d<double, 3> Acc(3, 0.0); // array_1d<double,6> Sigma(6,0.0); double pressure = 0; double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; double dNdXj = 0; double dNdYj = 0; double dNdZj = 0; unsigned int firstRow = 0; unsigned int firstCol = 0; double density = 0; double deviatoricCoeff = 0; double volumetricCoeff = 0; /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = nodalSFDneighboursId.size(); const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); if (neighSize > 1 && nodalVolume > 0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); if (LHS_Contribution.size1() != localSize) LHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (RHS_Contribution.size() != localSize) RHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (EquationId.size() != localSize) EquationId.resize(localSize, false); noalias(LHS_Contribution) = ZeroMatrix(localSize, localSize); noalias(RHS_Contribution) = ZeroVector(localSize); this->SetMaterialPropertiesToFluid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume); firstRow = 0; firstCol = 0; if (dimension == 2) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); RHS_Contribution[0] += -nodalVolume * density * Acc[0]; RHS_Contribution[1] += -nodalVolume * density * Acc[1]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; //-------- INTERNAL FORCES TERM -------// array_1d<double, 3> Sigma(3, 0.0); Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta); Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]); RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta; firstRow += 2; } firstRow = 0; firstCol += 2; if (i < neighb_nodes.size()) { EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ } else if (dimension == 3) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); RHS_Contribution[0] += -nodalVolume * density * Acc[0]; RHS_Contribution[1] += -nodalVolume * density * Acc[1]; RHS_Contribution[2] += -nodalVolume * density * Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; RHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2]; //-------- INTERNAL FORCES TERM -------// array_1d<double, 6> Sigma(6, 0.0); Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta); Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; Sigma[2] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]); RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]); RHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2]; LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta; firstRow += 3; } firstRow = 0; firstCol += 3; if (i < neighb_nodes.size()) { EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } // } KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b, ModelPart &rModelPart) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY Timer::Start("Build"); // boost::timer m_build_time; BuildFluidNodally(pScheme, rModelPart, A, b); // std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl; Timer::Stop("Build"); // ApplyPointLoads(pScheme,rModelPart,b); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; // const double start_solve = OpenMPUtils::GetCurrentTime(); // Timer::Start("Solve"); /* boost::timer m_solve_time; */ SystemSolveWithPhysics(A, Dx, b, rModelPart); /* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */ // Timer::Stop("Solve"); // const double stop_solve = OpenMPUtils::GetCurrentTime(); // KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType &pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = ParallelUtilities::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } for (int i = 0; i < static_cast<int>(nelements); ++i) { auto it_elem = pElements.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } ConditionsArrayType &pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp parallel for firstprivate(nconditions, ElementalDofList) for (int i = 0; i < nconditions; ++i) { auto it_cond = pConditions.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } //here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5 * static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { // //just for debugging // std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; // for (int i = 0; i < new_max; i++) // { // if (i + new_max < old_max) // { // std::cout << i << " - " << i + new_max << std::endl; // } // } // std::cout << "********************" << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5 * static_cast<double>(old_max)); } DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back((*it)); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_init_lock(&mlock_array[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart &rModelPart) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY // boost::timer m_contruct_matrix; if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType &A = *pA; TSystemVectorType &Dx = *pDx; TSystemVectorType &b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if (BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize, false); } // std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); if (this->mpReactionsVector != NULL) TSparseSpace::Clear((this->mpReactionsVector)); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart &rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void Assemble( TSystemMatrixType &A, TSystemVectorType &b, const LocalSystemMatrixType &LHS_Contribution, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId #ifdef _OPENMP , std::vector<omp_lock_t> &lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&lock_array[i_global]); #endif b[i_global] += RHS_Contribution(i_local); for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } #ifdef _OPENMP omp_unset_lock(&lock_array[i_global]); #endif } //note that assembly on fixed rows is not performed here } } //************************************************************************** virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType &A, ModelPart &rModelPart) { //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); const std::size_t equation_size = BaseType::mEquationSystemSize; #ifdef USE_GOOGLE_HASH std::vector<google::dense_hash_set<std::size_t>> indices(equation_size); const std::size_t empty_key = 2 * equation_size + 10; #else std::vector<std::unordered_set<std::size_t>> indices(equation_size); #endif #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { #ifdef USE_GOOGLE_HASH indices[iii].set_empty_key(empty_key); #else indices[iii].reserve(40); #endif } Element::EquationIdVectorType EquationId; ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol = 0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { firstCol += dimension; EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } for (std::size_t i = 0; i < EquationId.size(); i++) { if (EquationId[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[EquationId[i]]); #endif auto &row_indices = indices[EquationId[i]]; for (auto it = EquationId.begin(); it != EquationId.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[EquationId[i]]); #endif } } } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel for firstprivate(nconditions, ids) for (int iii = 0; iii < nconditions; iii++) { typename ConditionsArrayType::iterator i_condition = cond_begin + iii; pScheme->EquationId(*i_condition, ids, CurrentProcessInfo); for (std::size_t i = 0; i < ids.size(); i++) { if (ids[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[ids[i]]); #endif auto &row_indices = indices[ids[i]]; for (auto it = ids.begin(); it != ids.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[ids[i]]); #endif } } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double *Avalues = A.value_data().begin(); std::size_t *Arow_indices = A.index1_data().begin(); std::size_t *Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } void AssembleLHS( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ #ifdef _OPENMP std::vector<omp_lock_t> mlock_array; #endif ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void AssembleRHS( TSystemVectorType &b, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } else //fixed dof { double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedEliminationBuilderAndSolver */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta; opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also definate that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=Sc; continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if (traits == UndefinedPixelTrait) continue; if (source_traits != UndefinedPixelTrait) SetPixelChannel(image,channel,p[i],q); else if (channel == AlphaPixelChannel) SetPixelChannel(image,channel,OpaqueAlpha,q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; MagickRealType angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* do the variable blurring of each pixel in image */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs((double) angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { (void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1, blur.x2,blur.y1, blur.y2); (void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale* GetPixelRed(p),QuantumScale*GetPixelGreen(p)); #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); (void) InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case LightenCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case ModulusAddCompositeOp: case ModulusSubtractCompositeOp: case MultiplyCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ScreenCompositeOp: case SoftLightCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } default: { alpha=1.0; break; } } if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=Sc; continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case BlurCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-Dca/Da)*Sa/ Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca* (1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) (QuantumRange- GetPixelBlack(source_image,p)); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/Sca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { pixel=Sc+Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case ModulusSubtractCompositeOp: { pixel=Sc-Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sca); break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-(Dca/Da)))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*(Dca/Da)* (4.0*(Dca/Da)+1.0)*((Dca/Da)-1.0)+7.0*(Dca/Da))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow((Dca/Da),0.5)- (Dca/Da))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca* (1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(texture_image,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
dct_lee_cpu.h
/** * @file dct_lee_cpu.h * @author Yibo Lin (DREAMPlace) * @date Oct 2018 */ #ifndef DREAMPLACE_DCT_LEE_CPU_H #define DREAMPLACE_DCT_LEE_CPU_H #include <vector> #include <cmath> #include <stdexcept> #include "utility/src/Msg.h" DREAMPLACE_BEGIN_NAMESPACE namespace lee { constexpr double PI = 3.14159265358979323846; /// Return true if a number is power of 2 template <typename T = unsigned> inline bool isPowerOf2(T val) { return val && (val & (val - 1)) == 0; } /// Transpose a row-major matrix with M rows and N columns using block transpose method template <typename TValue, typename TIndex = unsigned> inline void transpose(const TValue *in, TValue *out, TIndex M, TIndex N, TIndex blockSize = 16) { //#pragma omp parallel for collapse(2) schedule(static) for (TIndex j = 0; j < N; j += blockSize) { for (TIndex i = 0; i < M; i += blockSize) { // Transpose the block beginning at [i, j] TIndex xend = std::min(M, i + blockSize); TIndex yend = std::min(N, j + blockSize); for (TIndex y = j; y < yend; ++y) { for (TIndex x = i; x < xend; ++x) { out[x + y * M] = in[y + x * N]; } } } } } /// Negate values in odd position of a vector template <typename TValue, typename TIndex = unsigned> inline void negateOddEntries(TValue *vec, TIndex N, int num_threads) { #pragma omp parallel for num_threads(num_threads) for (TIndex i = 1; i < N; i += 2) { vec[i] = -vec[i]; } } /// Precompute cosine values needed for N-point dct /// @param cos size N - 1 buffer, contains the result after function call /// @param N the length of target dct, must be power of 2 template <typename TValue, typename TIndex = unsigned> void precompute_dct_cos(TValue *cos, TIndex N) { // The input length must be power of 2 if (! isPowerOf2<TIndex>(N)) { throw std::domain_error("Input length is not power of 2."); } TIndex offset = 0; TIndex halfLen = N / 2; while (halfLen) { TValue phaseStep = 0.5 * PI / halfLen; TValue phase = 0.5 * phaseStep; for (TIndex i = 0; i < halfLen; ++i) { cos[offset + i] = 0.5 / std::cos(phase); phase += phaseStep; } offset += halfLen; halfLen /= 2; } } /// Precompute cosine values needed for N-point idct /// @param cos size N - 1 buffer, contains the result after function call /// @param N the length of target idct, must be power of 2 template <typename TValue, typename TIndex = unsigned> void precompute_idct_cos(TValue *cos, TIndex N) { // The input length must be power of 2 if (! isPowerOf2<TIndex>(N)) { throw std::domain_error("Input length is not power of 2."); } TIndex offset = 0; TIndex halfLen = 1; while(halfLen < N) { TValue phaseStep = 0.5 * PI / halfLen; TValue phase = 0.5 * phaseStep; for (TIndex i = 0; i < halfLen; ++i) { cos[offset + i] = 0.5 / std::cos(phase); phase += phaseStep; } offset += halfLen; halfLen *= 2; } } /// The implementation of fast Discrete Cosine Transform (DCT) algorithm and its inverse (IDCT) are Lee's algorithms /// Algorithm reference: A New Algorithm to Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984 /// /// Lee's algorithm has a recursive structure in nature. /// Here is a sample recursive implementation: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms /// /// My implementation here is iterative, which is more efficient than the recursive version. /// Here is a sample iterative implementation: https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT /// Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1 /// /// @param vec length N sequence to be transformed /// @param temp length 2 * N helping buffer /// @param cos length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' /// @param N length of vec, must be power of 2 template <typename TValue, typename TIndex = unsigned> inline void dct(TValue *vec, TValue *out, TValue *buf, const TValue *cos, TIndex N) { // The input length must be power of 2 if (! isPowerOf2<TIndex>(N)) { throw std::domain_error("Input length is not power of 2."); } // Pointers point to the beginning indices of two adjacent iterations TValue *curr = out; TValue *next = buf; // 'temp' is used to store data of two adjacent iterations // Copy 'vec' to the first N element in 'temp' std::copy(vec, vec + N, curr); // Current bufferfly length and half length TIndex len = N; TIndex halfLen = len / 2; // Iteratively bi-partition sequences into sub-sequences TIndex cosOffset = 0; while (halfLen) { TIndex offset = 0; TIndex steps = N / len; for (TIndex k = 0; k < steps; ++k) { for (TIndex i = 0; i < halfLen; ++i) { next[offset + i] = curr[offset + i] + curr[offset + len - i - 1]; next[offset + halfLen + i] = (curr[offset + i] - curr[offset + len -i - 1]) * cos[cosOffset + i]; } offset += len; } std::swap(curr, next); cosOffset += halfLen; len = halfLen; halfLen /= 2; } // Bottom-up form the final DCT solution // Note that the case len = 2 will do nothing, so we start from len = 4 len = 4; halfLen = 2; while(halfLen < N) { TIndex offset = 0; TIndex steps = N / len; for(TIndex k = 0; k < steps; ++k) { for(TIndex i = 0; i < halfLen - 1; ++i) { next[offset + i * 2] = curr[offset + i]; next[offset + i * 2 + 1] = curr[offset + halfLen + i] + curr[offset + halfLen + i + 1]; } next[offset + len - 2] = curr[offset + halfLen - 1]; next[offset + len - 1] = curr[offset + len - 1]; offset += len; } std::swap(curr, next); halfLen = len; len *= 2; } // Populate the final results into 'out' if (curr != out) { std::copy(curr, curr+N, out); } } /// Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1 /// @param vec length N sequence to be transformed /// @param temp length 2 * N helping buffer /// @param cos length N - 1, stores cosine values precomputed by function 'precompute_idct_cos' /// @param N length of vec, must be power of 2 template <typename TValue, typename TIndex = unsigned> inline void idct(TValue *vec, TValue *out, TValue* buf, const TValue *cos, TIndex N) { // The input length must be power of 2 if (! isPowerOf2<TIndex>(N)) { throw std::domain_error("Input length is not power of 2."); } // Pointers point to the beginning indices of two adjacent iterations TValue *curr = out; TValue *next = buf; // This array is used to store date of two adjacent iterations // Copy 'vec' to the first N element in 'temp' std::copy(vec, vec + N, curr); curr[0] /= 2; // Current bufferfly length and half length TIndex len = N; TIndex halfLen = len / 2; // Iteratively bi-partition sequences into sub-sequences while (halfLen) { TIndex offset = 0; TIndex steps = N / len; for (TIndex k = 0; k < steps; ++k) { next[offset] = curr[offset]; next[offset + halfLen] = curr[offset + 1]; for (TIndex i = 1; i < halfLen; ++i) { next[offset + i] = curr[offset + i * 2]; next[offset + halfLen + i] = curr[offset + i * 2 - 1] + curr[offset + i * 2 + 1]; } offset += len; } std::swap(curr, next); len = halfLen; halfLen /= 2; } // Bottom-up form the final IDCT solution len = 2; halfLen = 1; TIndex cosOffset = 0; while(halfLen < N) { TIndex offset = 0; TIndex steps = N / len; for(TIndex k = 0; k < steps; ++k) { for(TIndex i = 0; i < halfLen; ++i) { TValue g = curr[offset + i]; TValue h = curr[offset + halfLen + i] * cos[cosOffset + i]; next[offset + i] = g + h; next[offset + len - 1 - i] = g - h; } offset += len; } std::swap(curr, next); cosOffset += halfLen; halfLen = len; len *= 2; } // Populate the final results into 'out' if (curr != out) { std::copy(curr, curr+N, out); } } /// Compute batch dct /// @param mtx size M * N row-major matrix to be transformed /// @param temp length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose /// @param cosM length M - 1, stores cosine values precomputed by function 'precompute_dct_cos' for M-point dct /// @param cosN length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' for N-point dct /// @param M number of rows /// @param N number of columns template <typename TValue, typename TIndex = unsigned> inline void dct(TValue *mtx, TValue *out, TValue* buf, const TValue *cos, TIndex M, TIndex N, int num_threads) { #pragma omp parallel for num_threads(num_threads) schedule(static) for (TIndex i = 0; i < M; ++i) { dct<TValue, TIndex>(mtx + i * N, out + i * N, buf + i*N, cos, N); } } /// Compute batch idct /// @param mtx size M * N row-major matrix to be transformed /// @param temp length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose /// @param cosM length M - 1, stores cosine values precomputed by function 'precompute_dct_cos' for M-point dct /// @param cosN length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' for N-point dct /// @param M number of rows /// @param N number of columns template <typename TValue, typename TIndex = unsigned> inline void idct(TValue *mtx, TValue *out, TValue* buf, const TValue *cos, TIndex M, TIndex N, int num_threads) { #pragma omp parallel for num_threads(num_threads) schedule(static) for (TIndex i = 0; i < M; ++i) { idct<TValue, TIndex>(mtx + i * N, out + i * N, buf + i*N, cos, N); } } } // End of namespace lee DREAMPLACE_END_NAMESPACE #endif
GB_binop__isge_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64) // A*D function (colscale): GB (_AxD__isge_uint64) // D*A function (rowscale): GB (_DxB__isge_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64) // C=scalar+B GB (_bind1st__isge_uint64) // C=scalar+B' GB (_bind1st_tran__isge_uint64) // C=A+scalar GB (_bind2nd__isge_uint64) // C=A'+scalar GB (_bind2nd_tran__isge_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
arraybench.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "common.h" #include "arraybench.h" double btest[IDA]; double atest[IDA]; #pragma omp threadprivate (btest) int main(int argc, char **argv) { init(argc, argv); /* GENERATE REFERENCE TIME */ reference("reference time 1", &refer); char testName[32]; /* TEST PRIVATE */ sprintf(testName, "PRIVATE %d", IDA); benchmark(testName, &testprivnew); /* TEST FIRSTPRIVATE */ sprintf(testName, "FIRSTPRIVATE %d", IDA); benchmark(testName, &testfirstprivnew); #ifdef OMPVER2 /* TEST COPYPRIVATE */ sprintf(testName, "COPYPRIVATE %d", IDA); benchmark(testName, &testcopyprivnew); #endif /* TEST THREADPRIVATE - COPYIN */ sprintf(testName, "COPYIN %d", IDA); benchmark(testName, &testthrprivnew); finalise(); return EXIT_SUCCESS; } void refer() { int j; double a[1]; for (j = 0; j < innerreps; j++) { array_delay(delaylength, a); } } void testfirstprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel firstprivate(atest) { array_delay(delaylength, atest); } } } void testprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel private(atest) { array_delay(delaylength, atest); } } } #ifdef OMPVER2 void testcopyprivnew() { int j; for (j=0; j<innerreps; j++) { #pragma omp parallel private(atest) { #pragma omp single copyprivate(atest) { array_delay(delaylength, atest); } } } } #endif void testthrprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel copyin(btest) { array_delay(delaylength, btest); } } }
primes.c
#include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include <offload.h> const int a = 1; const int b = 3000000; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } /* * is_prime_number: Returns 1 if n is a prime number and 0 otherwise. * This function uses trial division primality test. */ __attribute__((target(mic))) int is_prime_number(int n) { int limit = sqrt(n) + 1; for (int i = 2; i <= limit; i++) { if (n % i == 0) return 0; } return (n > 1) ? 1 : 0; } int count_prime_numbers(int a, int b) { int nprimes = 0; /* Count '2' as a prime number */ if (a <= 2) { nprimes = 1; a = 2; } /* Shift 'a' to odd number */ if (a % 2 == 0) a++; /* Loop over odd numbers: a, a + 2, a + 4, ... , b */ for (int i = a; i <= b; i += 2) { if (is_prime_number(i)) nprimes++; } return nprimes; } double run_host_serial() { double t = wtime(); int n = count_prime_numbers(a, b); t = wtime() - t; printf("Result (host serial): %d\n", n); return t; } int count_prime_numbers_omp(int a, int b) { int nprimes = 0; /* Count '2' as a prime number */ if (a <= 2) { nprimes = 1; a = 2; } /* Shift 'a' to odd number */ if (a % 2 == 0) a++; #pragma omp parallel { /* Loop over odd numbers: a, a + 2, a + 4, ... , b */ #pragma omp for schedule(dynamic, 100) reduction(+:nprimes) for (int i = a; i <= b; i += 2) { if (is_prime_number(i)) nprimes++; } } return nprimes; } double run_host_parallel() { double t = wtime(); int n = count_prime_numbers_omp(a, b); t = wtime() - t; printf("Result (host parallel): %d\n", n); return t; } __attribute__((target(mic))) int count_prime_numbers_phi(int a, int b) { int nprimes = 0; /* Count '2' as a prime number */ if (a <= 2) { nprimes = 1; a = 2; } /* Shift 'a' to odd number */ if (a % 2 == 0) a++; /* Loop over odd numbers: a, a + 2, a + 4, ... , b */ for (int i = a; i <= b; i += 2) { if (is_prime_number(i)) nprimes++; } return nprimes; } double run_phi_serial() { #ifdef __INTEL_OFFLOAD printf("Intel Xeon Phi devices: %d\n", _Offload_number_of_devices()); #endif int n; double t = wtime(); #pragma offload target(mic) out(n) n = count_prime_numbers_phi(a, b); t = wtime() - t; printf("Result (phi serial): %d\n", n); return t; } __attribute__((target(mic))) int count_prime_numbers_phi_omp(int a, int b) { int nprimes = 0; /* Count '2' as a prime number */ if (a <= 2) { nprimes = 1; a = 2; } /* Shift 'a' to odd number */ if (a % 2 == 0) a++; /* Loop over odd numbers: a, a + 2, a + 4, ... , b */ //int nthreads; #pragma omp parallel { #pragma omp for schedule(dynamic, 100) reduction(+:nprimes) for (int i = a; i <= b; i += 2) { if (is_prime_number(i)) nprimes++; } //#pragma omp single //nthreads = omp_get_num_threads(); } //printf("MIC threads: %d\n", nthreads); return nprimes; } double run_phi_parallel() { #ifdef __INTEL_OFFLOAD printf("Intel Xeon Phi devices: %d\n", _Offload_number_of_devices()); #endif int n; double t = wtime(); #pragma offload target(mic) out(n) n = count_prime_numbers_phi_omp(a, b); t = wtime() - t; printf("Result (phi parallel): %d\n", n); return t; } int main(int argc, char **argv) { printf("Count prime numbers in [%d, %d]\n", a, b); double thost_serial = run_host_serial(); double thost_par = run_host_parallel(); double tphi_serial = run_phi_serial(); double tphi_par = run_phi_parallel(); printf("Execution time (host serial): %.6f\n", thost_serial); printf("Execution time (host parallel): %.6f\n", thost_par); printf("Execution time (phi serial): %.6f\n", tphi_serial); printf("Execution time (phi parallel): %.6f\n", tphi_par); printf("Ratio phi_serial/host_serial: %.2f\n", tphi_serial / thost_serial); printf("Speedup host_serial/host_omp: %.2f\n", thost_serial / thost_par); printf("Speedup host_omp/phi_omp: %.2f\n", thost_par / tphi_par); printf("Speedup host_serial/phi_omp: %.2f\n", thost_serial / tphi_par); printf("Speedup phi_serial/phi_omp: %.2f\n", tphi_serial / tphi_par); return 0; }
MatrixMXN.h
#pragma once #include "VectorND.h" #include <fstream> template<class T> class MatrixMN { public: int num_rows_; // m_ int num_cols_; // n_ T *values_; MatrixMN() : values_(nullptr), num_rows_(0), num_cols_(0) {} MatrixMN(const int& _m, const int& _n) : values_(nullptr), num_rows_(0), num_cols_(0) {} void initialize(const int& _m, const int& _n) { num_rows_ = _m; num_cols_ = _n; SAFE_DELETE_ARRAY(values_); const int num_all = num_rows_ * num_cols_; { values_ = new T[num_all]; for (int i = 0; i < num_all; i++) values_[i] = (T)0; } } void cout() { for (int row = 0; row < num_rows_; row++) { for (int col = 0; col < num_cols_; col++) { std::cout << getValue(row, col) << " "; } std::cout << std::endl; } } void writeTXT(std::ofstream& of) const { of << num_rows_ << " " << num_cols_ << std::endl; for (int i = 0; i < num_rows_ * num_cols_; i++) { of << values_[i]; if (i != num_rows_ * num_cols_ - 1) of << " "; } of << std::endl; } void multiply(const VectorND<T>& vector, VectorND<T>& result) const { #pragma omp parallel for for (int row = 0; row < num_rows_; row++) { result.values_[row] = (T)0; int ix = row*num_cols_; T temp; for (int col = 0; col < num_cols_; col++, ix++) { temp = values_[ix]; temp *= vector.values_[col]; result.values_[row] += temp; } } } };
GB_unaryop__identity_int64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_bool // op(A') function: GB_tran__identity_int64_bool // C type: int64_t // A type: bool // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_bool ( int64_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lt_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_fp32 // A.*B function (eWiseMult): GB_AemultB__lt_fp32 // A*D function (colscale): GB_AxD__lt_fp32 // D*A function (rowscale): GB_DxB__lt_fp32 // C+=B function (dense accum): GB_Cdense_accumB__lt_fp32 // C+=b function (dense accum): GB_Cdense_accumb__lt_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_fp32 // C=scalar+B GB_bind1st__lt_fp32 // C=scalar+B' GB_bind1st_tran__lt_fp32 // C=A+scalar GB_bind2nd__lt_fp32 // C=A'+scalar GB_bind2nd_tran__lt_fp32 // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_FP32 || GxB_NO_LT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lt_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
setbv.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB LU code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "applu.incl" //--------------------------------------------------------------------- // set the boundary values of dependent variables //--------------------------------------------------------------------- void setbv() { //--------------------------------------------------------------------- // local variables //--------------------------------------------------------------------- int i, j, k, m; double temp1[5], temp2[5]; //--------------------------------------------------------------------- // set the dependent variable values along the top and bottom faces //--------------------------------------------------------------------- #pragma omp parallel default(shared) private(i,j,k,m,temp1,temp2) \ shared(nx,ny,nz) { #pragma omp for schedule(static) for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { exact( i, j, 0, temp1 ); exact( i, j, nz-1, temp2 ); for (m = 0; m < 5; m++) { u[0][j][i][m] = temp1[m]; u[nz-1][j][i][m] = temp2[m]; } } } //--------------------------------------------------------------------- // set the dependent variable values along north and south faces //--------------------------------------------------------------------- #pragma omp for schedule(static) nowait for (k = 0; k < nz; k++) { for (i = 0; i < nx; i++) { exact( i, 0, k, temp1 ); exact( i, ny-1, k, temp2 ); for (m = 0; m < 5; m++) { u[k][0][i][m] = temp1[m]; u[k][ny-1][i][m] = temp2[m]; } } } //--------------------------------------------------------------------- // set the dependent variable values along east and west faces //--------------------------------------------------------------------- #pragma omp for schedule(static) nowait for (k = 0; k < nz; k++) { for (j = 0; j < ny; j++) { exact( 0, j, k, temp1 ); exact( nx-1, j, k, temp2 ); for (m = 0; m < 5; m++) { u[k][j][0][m] = temp1[m]; u[k][j][nx-1][m] = temp2[m]; } } } } //end parallel }
GB_binop__rminus_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_01__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fc32) // A*D function (colscale): GB (_AxD__rminus_fc32) // D*A function (rowscale): GB (_DxB__rminus_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fc32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fc32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fc32) // C=scalar+B GB (_bind1st__rminus_fc32) // C=scalar+B' GB (_bind1st_tran__rminus_fc32) // C=A+scalar GB (_bind2nd__rminus_fc32) // C=A'+scalar GB (_bind2nd_tran__rminus_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_minus (bij, aij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_minus (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FC32 || GxB_NO_RMINUS_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_minus (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_minus (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_minus (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_minus (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) { for (t4=max(max(ceild(t1-508,512),ceild(4*t2-Nz-1011,1024)),ceild(4*t3-Ny-1011,1024));t4<=min(min(min(floord(4*Nt+Nx-9,1024),floord(2*t1+Nx-3,1024)),floord(4*t2+Nx-9,1024)),floord(4*t3+Nx-9,1024));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
resource_manager.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_RESOURCE_MANAGER_H_ #define CORE_RESOURCE_MANAGER_H_ #include <omp.h> #include <sched.h> #include <algorithm> #include <cmath> #include <limits> #include <memory> #include <ostream> #include <set> #include <string> #include <unordered_map> #include <utility> #include <vector> #if defined(USE_OPENCL) && !defined(__ROOTCLING__) #ifdef __APPLE__ #define CL_HPP_ENABLE_EXCEPTIONS #define CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY #define CL_HPP_MINIMUM_OPENCL_VERSION 120 #define CL_HPP_TARGET_OPENCL_VERSION 120 #include "cl2.hpp" #else #define __CL_ENABLE_EXCEPTIONS #include <CL/cl2.hpp> #endif #endif #include "core/agent/agent.h" #include "core/agent/agent_handle.h" #include "core/agent/agent_uid.h" #include "core/agent/agent_uid_generator.h" #include "core/container/agent_uid_map.h" #include "core/diffusion/diffusion_grid.h" #include "core/operation/operation.h" #include "core/simulation.h" #include "core/type_index.h" #include "core/util/numa.h" #include "core/util/root.h" #include "core/util/thread_info.h" #include "core/util/type.h" namespace bdm { /// ResourceManager stores agents and diffusion grids and provides /// methods to add, remove, and access them. Agents are uniquely identified /// by their AgentUid, and AgentHandle. An AgentHandle might change during the /// simulation. class ResourceManager { public: explicit ResourceManager(TRootIOCtor* r) {} ResourceManager(); virtual ~ResourceManager() { for (auto& el : diffusion_grids_) { delete el.second; } for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } } if (type_index_) { delete type_index_; } } ResourceManager& operator=(ResourceManager&& other) { if (agents_.size() != other.agents_.size()) { Log::Fatal( "Restored ResourceManager has different number of NUMA nodes."); } for (auto& el : diffusion_grids_) { delete el.second; } for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } } agents_ = std::move(other.agents_); diffusion_grids_ = std::move(other.diffusion_grids_); RebuildAgentUidMap(); // restore type_index_ if (type_index_) { for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { type_index_->Add(agent); } } } return *this; } void RebuildAgentUidMap() { // rebuild uid_ah_map_ uid_ah_map_.clear(); auto* agent_uid_generator = Simulation::GetActive()->GetAgentUidGenerator(); uid_ah_map_.resize(agent_uid_generator->GetHighestIndex() + 1); for (unsigned n = 0; n < agents_.size(); ++n) { for (unsigned i = 0; i < agents_[n].size(); ++i) { auto* agent = agents_[n][i]; this->uid_ah_map_.Insert(agent->GetUid(), AgentHandle(n, i)); } } } Agent* GetAgent(const AgentUid& uid) { if (!uid_ah_map_.Contains(uid)) { return nullptr; } auto& ah = uid_ah_map_[uid]; return agents_[ah.GetNumaNode()][ah.GetElementIdx()]; } Agent* GetAgent(AgentHandle ah) { return agents_[ah.GetNumaNode()][ah.GetElementIdx()]; } AgentHandle GetAgentHandle(const AgentUid& uid) { return uid_ah_map_[uid]; } void AddDiffusionGrid(DiffusionGrid* dgrid) { uint64_t substance_id = dgrid->GetSubstanceId(); auto search = diffusion_grids_.find(substance_id); if (search != diffusion_grids_.end()) { Log::Fatal("ResourceManager::AddDiffusionGrid", "You tried to add a diffusion grid with an already existing " "substance id. Please choose a different substance id."); } else { diffusion_grids_[substance_id] = dgrid; } } void RemoveDiffusionGrid(size_t substance_id) { auto search = diffusion_grids_.find(substance_id); if (search != diffusion_grids_.end()) { delete search->second; diffusion_grids_.erase(search); } else { Log::Error("ResourceManager::RemoveDiffusionGrid", "You tried to remove a diffusion grid that does not exist."); } } /// Return the diffusion grid which holds the substance of specified id DiffusionGrid* GetDiffusionGrid(size_t substance_id) const { if(substance_id >= diffusion_grids_.size()) { Log::Error("DiffusionGrid::GetDiffusionGrid", "You tried to request diffusion grid '", substance_id, "', but it does not exist! Make sure that it's the correct id " "correctly and that the diffusion grid is registered."); return nullptr; } return diffusion_grids_.at(substance_id); } /// Return the diffusion grid which holds the substance of specified name /// Caution: using this function in a tight loop will result in a slow /// simulation. Use `GetDiffusionGrid(size_t)` in those cases. DiffusionGrid* GetDiffusionGrid(std::string substance_name) const { for (auto& el : diffusion_grids_) { auto& dg = el.second; if (dg->GetSubstanceName() == substance_name) { return dg; } } Log::Error("DiffusionGrid::GetDiffusionGrid", "You tried to request a diffusion grid named '", substance_name, "', but it does not exist! Make sure that it's spelled " "correctly and that the diffusion grid is registered."); return nullptr; } /// Execute the given functor for all diffusion grids /// rm->ForEachDiffusionGrid([](DiffusionGrid* dgrid) { /// ... /// }); template <typename TFunctor> void ForEachDiffusionGrid(TFunctor&& f) const { for (auto& el : diffusion_grids_) { f(el.second); } } /// Returns the total number of agents if numa_node == -1 /// Otherwise the number of agents in the specific numa node size_t GetNumAgents(int numa_node = -1) const { if (numa_node == -1) { size_t num_agents = 0; for (auto& numa_agents : agents_) { num_agents += numa_agents.size(); } return num_agents; } else { return agents_[numa_node].size(); } } /// Apply a function on all elements in every container /// @param function that will be called with each container as a parameter /// /// rm->ForEachAgent([](Agent* element) { /// std::cout << *element << std::endl; /// }); virtual void ForEachAgent(const std::function<void(Agent*)>& function) { for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { function(agent); } } } virtual void ForEachAgent( const std::function<void(Agent*, AgentHandle)>& function) { for (uint64_t n = 0; n < agents_.size(); ++n) { auto& numa_agents = agents_[n]; for (uint64_t i = 0; i < numa_agents.size(); ++i) { function(numa_agents[i], AgentHandle(n, i)); } } } /// Apply a function on all elements.\n /// Function invocations are parallelized.\n /// Uses static scheduling. /// \see ForEachAgent virtual void ForEachAgentParallel(Functor<void, Agent*>& function); /// Apply an operation on all elements.\n /// Function invocations are parallelized.\n /// Uses static scheduling. /// \see ForEachAgent virtual void ForEachAgentParallel(Operation& op); virtual void ForEachAgentParallel( Functor<void, Agent*, AgentHandle>& function); /// Apply a function on all elements.\n /// Function invocations are parallelized.\n /// Uses dynamic scheduling and work stealing. Batch size controlled by /// `chunk`. /// \param chunk number of agents that are assigned to a thread (batch /// size) /// \see ForEachAgent virtual void ForEachAgentParallel( uint64_t chunk, Functor<void, Agent*, AgentHandle>& function); /// Reserves enough memory to hold `capacity` number of agents for /// each numa domain. void Reserve(size_t capacity) { for (auto& numa_agents : agents_) { numa_agents.reserve(capacity); } if (type_index_) { type_index_->Reserve(capacity); } } /// Resize `agents_[numa_node]` such that it holds `current + additional` /// elements after this call. /// Returns the size after uint64_t GrowAgentContainer(size_t additional, size_t numa_node) { if (additional == 0) { return agents_[numa_node].size(); } auto current = agents_[numa_node].size(); if (current + additional < agents_[numa_node].size()) { agents_[numa_node].reserve((current + additional) * 1.5); } agents_[numa_node].resize(current + additional); return current; } /// Returns true if an agent with the given uid is stored in this /// ResourceManager. bool ContainsAgent(const AgentUid& uid) const { return uid_ah_map_.Contains(uid); } /// Remove all agents /// NB: This method is not thread-safe! This function invalidates /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void ClearAgents() { uid_ah_map_.clear(); for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } numa_agents.clear(); } if (type_index_) { type_index_->Clear(); } } /// Reorder agents such that, agents are distributed to NUMA /// nodes. Nearby agents will be moved to the same NUMA node. virtual void LoadBalance(); void DebugNuma() const; /// NB: This method is not thread-safe! This function might invalidate /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void AddAgent(Agent* agent, // NOLINT typename AgentHandle::NumaNode_t numa_node = 0) { auto uid = agent->GetUid(); if (uid.GetIndex() >= uid_ah_map_.size()) { uid_ah_map_.resize(uid.GetIndex() + 1); } agents_[numa_node].push_back(agent); uid_ah_map_.Insert(uid, AgentHandle(numa_node, agents_[numa_node].size() - 1)); if (type_index_) { type_index_->Add(agent); } } void ResizeAgentUidMap() { auto* agent_uid_generator = Simulation::GetActive()->GetAgentUidGenerator(); auto highest_idx = agent_uid_generator->GetHighestIndex(); auto new_size = highest_idx * 1.5 + 1; if (highest_idx >= uid_ah_map_.size()) { uid_ah_map_.resize(new_size); } if (type_index_) { type_index_->Reserve(new_size); } } void EndOfIteration() { // Check if SoUiD defragmentation should be turned on or off double utilization = static_cast<double>(GetNumAgents()) / static_cast<double>(uid_ah_map_.size()); auto* sim = Simulation::GetActive(); auto* param = sim->GetParam(); if (utilization < param->agent_uid_defragmentation_low_watermark) { sim->GetAgentUidGenerator()->EnableDefragmentation(&uid_ah_map_); } else if (utilization > param->agent_uid_defragmentation_high_watermark) { sim->GetAgentUidGenerator()->DisableDefragmentation(); } } /// Adds `new_agents` to `agents_[numa_node]`. `offset` specifies /// the index at which the first element is inserted. Agents are inserted /// consecutively. This methos is thread safe only if insertion intervals do /// not overlap! virtual void AddAgents(typename AgentHandle::NumaNode_t numa_node, uint64_t offset, const std::vector<Agent*>& new_agents) { uint64_t i = 0; for (auto* agent : new_agents) { auto uid = agent->GetUid(); uid_ah_map_.Insert(uid, AgentHandle(numa_node, offset + i)); agents_[numa_node][offset + i] = agent; i++; } if (type_index_) { #pragma omp critical for (auto* agent : new_agents) { type_index_->Add(agent); } } } /// Removes the agent with the given uid.\n /// NB: This method is not thread-safe! This function invalidates /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void RemoveAgent(const AgentUid& uid) { // remove from map if (uid_ah_map_.Contains(uid)) { auto ah = uid_ah_map_[uid]; uid_ah_map_.Remove(uid); // remove from vector auto& numa_agents = agents_[ah.GetNumaNode()]; Agent* agent = nullptr; if (ah.GetElementIdx() == numa_agents.size() - 1) { agent = numa_agents.back(); numa_agents.pop_back(); } else { // swap agent = numa_agents[ah.GetElementIdx()]; auto* reordered = numa_agents.back(); numa_agents[ah.GetElementIdx()] = reordered; numa_agents.pop_back(); uid_ah_map_.Insert(reordered->GetUid(), ah); } if (type_index_) { type_index_->Remove(agent); } delete agent; } } const TypeIndex* GetTypeIndex() const { return type_index_; } protected: /// Maps an AgentUid to its storage location in `agents_` \n AgentUidMap<AgentHandle> uid_ah_map_ = AgentUidMap<AgentHandle>(100u); //! /// Pointer container for all agents std::vector<std::vector<Agent*>> agents_; /// Maps a diffusion grid ID to the pointer to the diffusion grid std::unordered_map<uint64_t, DiffusionGrid*> diffusion_grids_; ThreadInfo* thread_info_ = ThreadInfo::GetInstance(); //! TypeIndex* type_index_ = nullptr; friend class SimulationBackup; friend std::ostream& operator<<(std::ostream& os, const ResourceManager& rm); BDM_CLASS_DEF_NV(ResourceManager, 1); }; inline std::ostream& operator<<(std::ostream& os, const ResourceManager& rm) { os << "\033[1mAgents per numa node\033[0m" << std::endl; uint64_t cnt = 0; for (auto& numa_agents : rm.agents_) { os << "numa node " << cnt++ << " -> size: " << numa_agents.size() << std::endl; } return os; } } // namespace bdm #endif // CORE_RESOURCE_MANAGER_H_
fmm.h
#pragma once /****************************************************************************** * * mfmm * A high-performance fast multipole method library using C++. * * A fork of ExaFMM (BSD-3-Clause lisence). * Originally copyright Wang, Yokota and Barba. * * Modifications copyright HJA Bird. * ******************************************************************************/ #ifndef INCLUDE_MFMM_FMM_H_ #define INCLUDE_MFMM_FMM_H_ #include <fftw3.h> #include <Eigen/Dense> #include <Eigen/SVD> #include <algorithm> // std::fill #include <fstream> #include <numeric> #include "geometry.h" #include "mfmm.h" #include "p2p_methods.h" #include "timer.h" namespace mfmm { //! Base FMM class template <class FmmKernel> class Fmm : public p2p_methods<FmmKernel> { public: using potential_t = typename FmmKernel::potential_t; protected: using pt = potential_traits<potential_t>; public: using real_t = typename pt::real_t; using complex_t = typename pt::complex_t; using fmm_kernel_funcs_arg_t = typename FmmKernel::kernel_args_t; template <int Rows = dynamic, int Cols = dynamic, int RowOrder = row_major> using potential_matrix_t = typename pt::template potential_matrix_t<Rows, Cols, RowOrder>; template <int Rows = dynamic> using potential_vector_t = typename pt::template potential_vector_t<Rows>; template <int Rows = dynamic, int Cols = dynamic, int RowOrder = row_major> using real_matrix_t = typename pt::template real_matrix_t<Rows, Cols, RowOrder>; template <int Rows = dynamic> using real_vector_t = typename pt::template real_vector_t<Rows>; template <int Rows = dynamic, int Cols = dynamic, int RowOrder = row_major> using complex_matrix_t = typename pt::template complex_matrix_t<Rows, Cols, RowOrder>; template <int Rows = dynamic> using complex_vector_t = typename pt::template complex_vector_t<Rows>; using coord_t = typename pt::coord_t; template <int Rows = dynamic, int RowOrder = row_major> using coord_matrix_t = typename pt::template coord_matrix_t<Rows, RowOrder>; using node_t = Node<potential_t>; using nodevec_t = std::vector<node_t>; using nodeptrvec_t = std::vector<node_t*>; int m_p; //!< Order of expansion int m_numSurf; //!< Number of points on equivalent / check surface int m_numConvPoints; //!< Number of points on convolution grid int m_numFreq; //!< Number of coefficients in DFT (depending on whether T is //!< real_t) int m_numCrit; //!< Max number of bodies per leaf int m_depth; //!< Depth of the tree real_t m_r0; //!< Half of the side length of the bounding box coord_t m_x0; //!< Coordinates of the center of root box Fmm() = delete; Fmm(int p, int nCrit, fmm_kernel_funcs_arg_t kernelArguments = fmm_kernel_funcs_arg_t{}) : p2p_methods<FmmKernel>{kernelArguments}, m_p{p}, m_numCrit{nCrit}, m_numSurf{6 * (p - 1) * (p - 1) + 2}, m_numConvPoints{8 * p * p * p}, m_numFreq{0} { m_numFreq = potential_traits<potential_t>::isComplexPotential ? m_numConvPoints : 4 * p * p * (p + 1); } ~Fmm() = default; protected: // Matrices for upwards check surface (& potentials) to upwards equivalent // surface (& densities). std::vector<potential_matrix_t<dynamic, dynamic>> m_matUC2E; // Matrices for downwards check surface (& potentials) to downards equivalent // surface (& densities). std::vector<potential_matrix_t<dynamic, dynamic>> m_matDC2E; std::vector< std::array<potential_matrix_t<dynamic, dynamic>, REL_COORD_M2M.size()>> m_matM2M; std::vector< std::array<potential_matrix_t<dynamic, dynamic>, REL_COORD_L2L.size()>> m_matL2L; std::vector<std::array<std::vector<complex_t>, REL_COORD_M2L.size()>> m_matM2L; // Data required for moment to local interaction. m_m2lData[octreeLevel] has // M2L data including offsets and interaction counts at the required level in // the octree. std::vector<M2LData<real_t>> m_m2lData; public: /** Compute the kernel matrix of a given kernel. * * The kernel matrix defines the interaction between the sources and the * targets: targetVal = kernelMatrix * sourceStrength. * This function evaluates the interaction kernel using unit source strength * to obtain each value in the matrix. * * @param sourceCoords Vector of source coordinates. * @param targetCoords Vector of target coordinates. * @return matrix Kernel matrix. */ template <int NumSources = dynamic, int NumTargets = dynamic, int SourceRowOrder = row_major, int TargetRowOrder = row_major> auto kernel_matrix( const coord_matrix_t<NumSources, SourceRowOrder>& sourceCoords, const coord_matrix_t<NumTargets, TargetRowOrder>& targetCoords) { const auto sourceValue = potential_vector_t<1>::Ones(); const size_t numSources = sourceCoords.rows(); const size_t numTargets = targetCoords.rows(); // Needs to be column major for 1 column. using return_t = Eigen::Matrix<potential_t, NumSources, NumTargets, column_major>; return_t kernelMatrix = return_t::Zero(numSources, numTargets); for (size_t i{0}; i < numSources; ++i) { for (size_t j{0}; j < numTargets; ++j) { kernelMatrix(i, j) = this->potential_P2P(sourceCoords.row(i), targetCoords.row(j)); } } return kernelMatrix; } /** Compute particle to particle interactions. * @param leafs A vector of leaf nodes. For each element in this vector, add * the interaction from the sources in the element's P2P list without using * any equivalent particles. **/ void operator_P2P(nodeptrvec_t& leafs) { nodeptrvec_t& targets = leafs; #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(targets.size()); i++) { node_t* target = targets[i]; nodeptrvec_t& sources = target->P2Plist(); for (int j = 0; j < static_cast<int>(sources.size()); j++) { node_t* source = sources[j]; target->target_potentials() += this->potential_P2P( source->source_coords(), source->source_strengths(), target->target_coords()); target->target_gradients() += this->gradient_P2P( source->source_coords(), source->source_strengths(), target->target_coords()); } } } /** Compute multiple to particle interactions. * @param leafs A vector of leaf nodes. For each element in this vector, add * the interaction from the sources in the element's M2P list. Uses source * equivalent particles with pre-computed source->up_equiv() potentials. **/ void operator_M2P(nodeptrvec_t& leafs) { nodeptrvec_t& targets = leafs; coord_t c = coord_t::Zero(3); std::vector<coord_matrix_t<>> upEquivSurf; upEquivSurf.resize(m_depth + 1); for (int level = 0; level <= m_depth; level++) { upEquivSurf[level].resize(m_numSurf, 3); upEquivSurf[level] = box_surface_coordinates<potential_t>(m_p, m_r0, level, c, 1.05); } #pragma omp parallel for for (int i = 0; i < static_cast<int>(targets.size()); i++) { node_t& target = *targets[i]; nodeptrvec_t& sources = target.M2Plist(); for (size_t j = 0; j < sources.size(); j++) { node_t& source = *sources[j]; int level = source.location().level(); // source node's equiv coord = relative equiv coord + node's center coord_matrix_t<> sourceEquivCoords{upEquivSurf[level]}; sourceEquivCoords.rowwise() += source.centre(); target.target_potentials() = this->potential_P2P( sourceEquivCoords, source.up_equiv(), target.target_coords()); target.target_gradients() = this->gradient_P2P( sourceEquivCoords, source.up_equiv(), target.target_coords()); } } } /** Particle to local operator. * @param nodes A vector of nodes to apply this operator to. **/ void operator_P2L(nodevec_t& nodes) { nodevec_t& targets = nodes; std::vector<coord_matrix_t<>> dn_check_surf; dn_check_surf.resize(m_depth + 1); for (int level = 0; level <= m_depth; level++) { dn_check_surf[level].resize(m_numSurf, 3); dn_check_surf[level] = box_surface_coordinates<potential_t>( m_p, m_r0, level, coord_t::Zero(3), 1.05); } #pragma omp parallel for for (int i = 0; i < static_cast<int>(targets.size()); i++) { node_t* target = &targets[i]; nodeptrvec_t& sources = {target->P2Llist()}; for (size_t j = 0; j < sources.size(); j++) { node_t* source = sources[j]; int level = target->location().level(); // target node's check coord = relative check coord + node's center coord_matrix_t<> targetCheckCoords(m_numSurf, 3); targetCheckCoords = dn_check_surf[level]; targetCheckCoords.rowwise() += target->centre(); target->down_equiv() = this->potential_P2P(source->source_coords(), source->source_strengths(), targetCheckCoords); } } } /** Evaluate upward equivalent charges for all nodes in a post-order * traversal. * @param nodes Vector of all nodes. * @param leafs Vector of pointers to leaf nodes. */ void upward_pass(nodevec_t& nodes, nodeptrvec_t& leafs, bool verbose = true) { start("P2M"); operator_P2M(leafs); stop("P2M", verbose); start("M2M"); #pragma omp parallel #pragma omp single nowait operator_M2M(nodes[0]); stop("M2M", verbose); } /** Evaluate potentials and gradients for all targets in a pre-order * traversal. * @param nodes Vector of all nodes. * @param leafs Vector of pointers to leaf nodes. */ void downward_pass(nodevec_t& nodes, nodeptrvec_t& leafs, bool verbose = true) { start("P2L"); operator_P2L(nodes); stop("P2L", verbose); start("M2P"); operator_M2P(leafs); stop("M2P", verbose); start("P2P"); operator_P2P(leafs); stop("P2P", verbose); start("M2L"); operator_M2L(nodes); stop("M2L", verbose); start("L2L"); operator_L2L(nodes[0]); stop("L2L", verbose); start("L2P"); operator_L2P(leafs); stop("L2P", verbose); } /** Check FMM accuracy by comparison to directly evaluated (N^2) solution. * @param leafs Vector of leaves. * @param sample Sample only some values, reducing computational cost. * @return The relative error of potential and gradient in L2 norm. */ std::vector<real_t> verify(nodeptrvec_t& leafs, bool sample = false) { nodevec_t targets; // vector of target nodes if (sample) { int nSamples = 10; size_t stride = leafs.size() / nSamples; for (size_t i = 0; i < nSamples; i++) { targets.push_back(*(leafs[i * stride])); } } else { // compute all values directly without sampling for (size_t i = 0; i < leafs.size(); i++) { targets.push_back(*leafs[i]); } } nodevec_t targets2 = targets; // target2 is used for direct summation #pragma omp parallel for for (int i = 0; i < static_cast<int>(targets2.size()); i++) { node_t* target = &targets2[i]; target->zero_target_values(); for (size_t j = 0; j < leafs.size(); j++) { target->target_potentials() += this->potential_P2P( leafs[j]->source_coords(), leafs[j]->source_strengths(), target->target_coords()); target->target_gradients() += this->gradient_P2P( leafs[j]->source_coords(), leafs[j]->source_strengths(), target->target_coords()); } } // relative error in L2 norm double potentialDiff{0}, potentialNorm{0}; double gradientDiff{0}, gradientNorm{0}; for (size_t i = 0; i < targets.size(); i++) { potentialNorm += targets2[i].target_potentials().squaredNorm(); potentialDiff += (targets2[i].target_potentials() - targets[i].target_potentials()) .squaredNorm(); gradientNorm += targets2[i].target_gradients().squaredNorm(); gradientDiff += (targets2[i].target_gradients() - targets[i].target_gradients()) .squaredNorm(); } std::vector<real_t> err(2); err[0] = sqrt(potentialDiff / potentialNorm); err[1] = sqrt(gradientDiff / gradientNorm); return err; } /// Allocate memory for precomputed matrices. void initialize_matrix() { const int nSurf = m_numSurf; int depth = m_depth; m_matUC2E.resize(depth + 1, potential_matrix_t<>(nSurf, nSurf)); m_matDC2E.resize(depth + 1, potential_matrix_t<>(nSurf, nSurf)); m_matM2M.resize(depth + 1); m_matL2L.resize(depth + 1); for (int level = 0; level <= depth; ++level) { std::fill(m_matM2M[level].begin(), m_matM2M[level].end(), potential_matrix_t<>(nSurf, nSurf)); std::fill(m_matL2L[level].begin(), m_matL2L[level].end(), potential_matrix_t<>(nSurf, nSurf)); } } /** Precompute M2M and L2L matrices. * @note Requires that the matrices for computing equivalent source densities * from check potentials are precomputed. (matrices UC2E and DC2E). **/ void precompute_M2M_L2L() { for (int level = 0; level <= m_depth; level++) { auto parent_up_check_surf = box_surface_coordinates<potential_t>( m_p, m_r0, level, {0, 0, 0}, 2.95); real_t s = m_r0 * std::pow(0.5, level + 1); int nPos = static_cast<int>(REL_COORD_M2M.size()); #pragma omp parallel for for (int i = 0; i < nPos; i++) { ivec3& coord = REL_COORD_M2M[i]; coord_t childCoord(coord.cast<real_t>() * s); auto child_up_equiv_surf = box_surface_coordinates<potential_t>( m_p, m_r0, level + 1, childCoord, 1.05); // Parent upwards check surface to child upwards equivalent surface. // Downwards check to downwards equivalent is transpose of this. potential_matrix_t<> matrix_pc2ce = kernel_matrix(parent_up_check_surf, child_up_equiv_surf); m_matM2M[level][i] = m_matUC2E[level] * matrix_pc2ce; m_matL2L[level][i] = m_matDC2E[level] * matrix_pc2ce.transpose(); } } } /// Precompute operator matrices. void precompute() { initialize_matrix(); precompute_check2equiv(); precompute_M2M_L2L(); precompute_M2L(); } /** Particle to multiple operator. Computes the equivalent source strengths of * a octree cell from the particles contained within it. * @param leafs A collection of leaf nodes to apply this operator to. **/ void operator_P2M(nodeptrvec_t& leafs) { std::vector<coord_matrix_t<>> upCheckSurf(m_depth + 1, coord_matrix_t<>(m_numSurf, 3)); for (int level = 0; level <= m_depth; level++) { upCheckSurf[level] = box_surface_coordinates<potential_t>( m_p, m_r0, level, {0, 0, 0}, 2.95); } #pragma omp parallel for for (int i = 0; i < static_cast<int>(leafs.size()); i++) { node_t* leaf = leafs[i]; int level = leaf->location().level(); // calculate upward check potential induced by sources' charges coord_matrix_t<> check_coord{upCheckSurf[level]}; check_coord.rowwise() += leaf->centre(); leaf->up_equiv() = this->potential_P2P( leaf->source_coords(), leaf->source_strengths(), check_coord); Eigen::Matrix<potential_t, Eigen::Dynamic, 1> equiv = m_matUC2E[level] * leaf->up_equiv(); for (int k = 0; k < m_numSurf; k++) { leaf->up_equiv()[k] = equiv[k]; } } } /** Local to target operator. * @param leafs A collection of leaf nodes to apply this operator to. **/ void operator_L2P(nodeptrvec_t& leafs) { std::vector<coord_matrix_t<>> downEquivSurf(m_depth + 1, coord_matrix_t<>(m_numSurf, 3)); for (int level = 0; level <= m_depth; level++) { downEquivSurf[level] = box_surface_coordinates<potential_t>( m_p, m_r0, level, {0, 0, 0}, 2.95); } #pragma omp parallel for for (int i = 0; i < static_cast<int>(leafs.size()); i++) { node_t* leaf = leafs[i]; int level = leaf->location().level(); // down check surface potential -> equivalent surface charge potential_vector_t<> equiv = m_matDC2E[level] * leaf->down_equiv(); leaf->down_equiv() = equiv; // equivalent surface charge -> target potential coord_matrix_t<> equiv_coord(downEquivSurf[level]); equiv_coord.rowwise() += leaf->centre(); leaf->target_potentials() += this->potential_P2P( equiv_coord, leaf->down_equiv(), leaf->target_coords()); leaf->target_gradients() += this->gradient_P2P( equiv_coord, leaf->down_equiv(), leaf->target_coords()); } } /** Multiple to multiple operator. * @param baseNode The top node in the octree to operate on. **/ void operator_M2M(node_t& baseNode) { const int nSurf = m_numSurf; if (baseNode.is_leaf()) { return; } #pragma omp parallel for schedule(dynamic) for (int octant = 0; octant < NCHILD; octant++) { if (baseNode.has_child(octant)) { operator_M2M(baseNode.child(octant)); } } for (int octant = 0; octant < NCHILD; octant++) { if (baseNode.has_child(octant)) { int level = baseNode.location().level(); potential_vector_t<> buffer = m_matM2M[level][octant] * baseNode.down_equiv(); baseNode.up_equiv() += buffer; } } } /** Local to local operator. * @param baseNode The top node in the octree to operate on. **/ void operator_L2L(node_t& baseNode) { const int nSurf = m_numSurf; if (baseNode.is_leaf()) { return; } for (int octant = 0; octant < NCHILD; octant++) { if (baseNode.has_child(octant)) { node_t& child = baseNode.child(octant); int level = baseNode.location().level(); potential_vector_t<> buffer = m_matL2L[level][octant] * baseNode.down_equiv(); child.down_equiv() += buffer; } } #pragma omp parallel for schedule(dynamic) for (int octant = 0; octant < NCHILD; octant++) { if (baseNode.has_child(octant)) { operator_L2L(baseNode.child(octant)); } } } /** Precomputations for moment to local operator. * Sets m_m2lData. * @param nonleafs A vector of pointers to the non-leafs nodes. **/ void setup_M2L(nodeptrvec_t& nonleafs) { const int depth = m_depth; int nPos = static_cast<int>(REL_COORD_M2L.size()); m_m2lData.resize(depth); // Collect all of the non-leaf nodes on a per-level basis. std::vector<nodeptrvec_t> targetNodes(depth); for (auto& leafPtr : nonleafs) { targetNodes[leafPtr->location().level()].push_back(leafPtr); } // prepare for m2lData for each level for (int l = 0; l < depth; l++) { m_m2lData[l] = setup_M2L(targetNodes[l], l); } } /** Compute moment to local operators for a given level. * @param levelNodes The non-leaf nodes at this level. * @param level The level in the octree. * @return An M2LData for this level. **/ M2LData<real_t> setup_M2L(nodeptrvec_t& levelNodes, int level) { const int nPos = static_cast<int>(REL_COORD_M2L.size()); const size_t fftSize = NCHILD * m_numFreq; nodeptrvec_t sourceNodes; { // Add every m2l interaction from levelNodes to the sourceNodeSet. std::set<node_t*> sourceNodeSet; for (auto& node : levelNodes) { nodeptrvec_t& m2lList = node->M2Llist(); for (int k = 0; k < nPos; k++) { if (m2lList[k] != nullptr) { sourceNodeSet.insert(m2lList[k]); } } } // Now turn that into a vector. for (auto it = sourceNodeSet.begin(); it != sourceNodeSet.end(); it++) { sourceNodes.push_back(*it); } } // prepare the indices of sourceNodes & levelNodes in all_up_equiv & // all_dn_equiv // displacement in all_up_equiv: std::vector<size_t> fftOffset(sourceNodes.size()); for (size_t i = 0; i < sourceNodes.size(); i++) { fftOffset[i] = sourceNodes[i]->child(0).index() * m_numSurf; } // displacement in all_dn_equiv: std::vector<size_t> ifftOffset(levelNodes.size()); for (size_t i = 0; i < levelNodes.size(); i++) { ifftOffset[i] = levelNodes[i]->child(0).index() * m_numSurf; } // calculate interaction_offset_f & interaction_count_offset std::vector<std::pair<size_t, size_t>> interactionOffsetF; std::array<size_t, nPos> interactionCountOffset; for (size_t i = 0; i < sourceNodes.size(); i++) { // node_id: node's index in sourceNodes list sourceNodes[i]->indexM2L() = i; } size_t interactionCountOffsetVar = 0; for (int k = 0; k < nPos; k++) { for (size_t i{0}; i < levelNodes.size(); i++) { nodeptrvec_t& M2L_list = levelNodes[i]->M2Llist(); if (M2L_list[k] != nullptr) { // std::pair{source node's displacement in fftIn, target node's // displacement in fftOut}. interactionOffsetF.push_back( {M2L_list[k]->indexM2L() * fftSize, i * fftSize}); interactionCountOffsetVar++; } } interactionCountOffset[k] = interactionCountOffsetVar; } M2LData<real_t> returnData; returnData.m_fftOffset = fftOffset; returnData.m_ifftOffset = ifftOffset; returnData.m_interactionOffsetF = interactionOffsetF; returnData.m_interactionCountOffset = interactionCountOffset; return returnData; } std::vector<complex_t> hadamard_product( std::array<size_t, static_cast<int>(REL_COORD_M2L.size())>& interactionCountOffset, std::vector<std::pair<size_t, size_t>>& interactionOffsetF, std::vector<complex_t>& fftIn, std::vector<std::vector<complex_matrix_t<NCHILD, NCHILD, column_major>>>& matrixM2L, size_t fftOutSize) { const size_t fftSize = NCHILD * m_numFreq; std::vector<complex_t> fftOut(fftOutSize, 0); #pragma omp parallel for schedule(static) for (int k = 0; k < m_numFreq; k++) { for (size_t iPos = 0; iPos < interactionCountOffset.size(); iPos++) { // k-th freq's (row) offset in matrix_M2L: complex_matrix_t<NCHILD, NCHILD, column_major>& M = matrixM2L[iPos][k]; size_t interactionCountOffset0 = (iPos == 0 ? 0 : interactionCountOffset[iPos - 1]); size_t interactionCountOffset1 = interactionCountOffset[iPos]; // Matrix vector product {8} = [8,8] * {8} for all interactions: for (size_t j = interactionCountOffset0; j < interactionCountOffset1; j++) { using l_vector_t = Eigen::Matrix<complex_t, 8, 1>; using l_mapped_vector_t = Eigen::Map<l_vector_t>; auto in = l_mapped_vector_t(fftIn.data() + interactionOffsetF[j].first + k * NCHILD); auto out = l_mapped_vector_t( fftOut.data() + interactionOffsetF[j].second + k * NCHILD); out += M * in; } } } return fftOut; } void operator_M2L(nodevec_t& nodes) { const int nSurf = m_numSurf; size_t nNodes = nodes.size(); constexpr size_t nPos = REL_COORD_M2L.size(); std::vector<potential_t> allUpEquiv(nNodes * nSurf), allDnEquiv(nNodes * nSurf); // matrixM2L[nPos index][frequency index] -> 8*8 matrix. std::vector<std::vector<complex_matrix_t<NCHILD, NCHILD, column_major>>> matrixM2L( nPos, std::vector<complex_matrix_t<NCHILD, NCHILD, column_major>>( m_numFreq, complex_matrix_t<NCHILD, NCHILD, column_major>::Zero( NCHILD, NCHILD))); // collect all upward equivalent charges #pragma omp parallel for schedule(static) for (int i = 0; i < nNodes; ++i) { for (int j = 0; j < nSurf; ++j) { allUpEquiv[i * nSurf + j] = nodes[i].up_equiv()[j]; allDnEquiv[i * nSurf + j] = nodes[i].down_equiv()[j]; } } // FFT-accelerate M2L for (size_t l{0}; l < m_depth; ++l) { // load M2L matrix for current level for (size_t i{0}; i < nPos; ++i) { size_t mSize = NCHILD * NCHILD * m_numFreq * sizeof(complex_t); std::memcpy(matrixM2L[i].data(), m_matM2L[l][i].data(), mSize); } std::vector<complex_t> fftIn = fft_up_equiv(m_m2lData[l].m_fftOffset, allUpEquiv); size_t outputFftSize = m_m2lData[l].m_ifftOffset.size() * m_numFreq * NCHILD; std::vector<complex_t> fftOut = hadamard_product( m_m2lData[l].m_interactionCountOffset, m_m2lData[l].m_interactionOffsetF, fftIn, matrixM2L, outputFftSize); ifft_dn_check(m_m2lData[l].m_ifftOffset, fftOut, allDnEquiv); } // update all downward check potentials #pragma omp parallel for schedule(static) for (int i = 0; i < nNodes; ++i) { for (int j = 0; j < nSurf; ++j) { nodes[i].down_equiv()[j] = allDnEquiv[i * nSurf + j]; } } } /** Precompute upwards check to equiv (UC2E) and downwads check to equiv * (DC2E) matrices. * @Note See Ying et al. sec. 3.2.1. SVD is used here instead of of Tikhonov * regularization. **/ void precompute_check2equiv() { coord_t boxCentre = coord_t::Zero(3); //#pragma omp parallel for for (int level = 0; level <= m_depth; ++level) { // compute kernel matrix auto upCheckSurf = box_surface_coordinates<potential_t>(m_p, m_r0, level, boxCentre, 2.95); auto upEquivSurf = box_surface_coordinates<potential_t>(m_p, m_r0, level, boxCentre, 1.05); // Upwards check surface to upwards equiv surface matrix. The down check // surf to down equiv matrix is the transpose of this. potential_matrix_t<> matrix_c2e = kernel_matrix(upCheckSurf, upEquivSurf); Eigen::BDCSVD<potential_matrix_t<>> svd( matrix_c2e, Eigen::ComputeFullU | Eigen::ComputeFullV); auto singularDiag = svd.singularValues(); auto U = svd.matrixU(); auto V = svd.matrixV(); // Pseudo-inverse of singular values matrix, removing negligible terms. real_t max_S = std::reduce( singularDiag.data(), singularDiag.data() + singularDiag.size(), 0., [](auto a1, auto a2) { return std::max(a1, a2); }); for (int i = 0; i < m_numSurf; i++) { singularDiag(i) = singularDiag(i) > pt::epsilon * max_S * 4 ? 1.0 / singularDiag(i) : 0.0; } auto S_inv = singularDiag.asDiagonal(); // The psuedo-inverse of matrix_c2e. Upwards check to equivalent. m_matUC2E[level] = V * S_inv * U.adjoint(); // Downwards check to downwards equivalent. m_matDC2E[level] = U.conjugate() * S_inv * V.transpose(); } } /** Precompute M2L matrices. **/ void precompute_M2L() { int fftSize = m_numFreq * NCHILD * NCHILD; std::array<std::vector<complex_t>, REL_COORD_M2L_helper.size()> matrix_M2L_Helper; m_matM2L.resize(m_depth); std::fill(matrix_M2L_Helper.begin(), matrix_M2L_Helper.end(), std::vector<complex_t>(m_numFreq)); // create fft plan ivec3 dim = ivec3{m_p, m_p, m_p} * 2; fft<potential_t, fft_dir::forwards> fftPlan(3, dim.data()); for (int level = 0; level < m_depth; ++level) { // compute M2L kernel matrix, perform DFT std::fill(m_matM2L[level].begin(), m_matM2L[level].end(), std::vector<complex_t>(fftSize)); #pragma omp parallel for for (int i = 0; i < static_cast<int>(REL_COORD_M2L_helper.size()); ++i) { coord_t boxCentre; for (int d = 0; d < 3; d++) { boxCentre[d] = REL_COORD_M2L_helper[i][d] * m_r0 * std::pow(0.5, level); // relative coords } coord_matrix_t<dynamic> convolutionCoords = convolution_grid<potential_t>(m_p, m_r0, level + 1, boxCentre); // convolution grid // potentials on convolution grid auto convValue = kernel_matrix<dynamic>(convolutionCoords, coord_t{coord_t::Zero()}); fftPlan.execute(convValue.data(), matrix_M2L_Helper[i].data()); } // convert M2L_Helper to M2L and reorder data layout to improve locality #pragma omp parallel for for (int i{0}; i < static_cast<int>(REL_COORD_M2L.size()); ++i) { for (int j = 0; j < NCHILD * NCHILD; j++) { // loop over child's relative positions int childRelIdx = M2L_INDEX_MAP[i][j]; if (childRelIdx != 123456789) { for (int k = 0; k < m_numFreq; k++) { // loop over frequencies int new_idx = k * (NCHILD * NCHILD) + j; m_matM2L[level][i][new_idx] = matrix_M2L_Helper[childRelIdx][k] / complex_t(m_numConvPoints); } } } } } } std::vector<complex_t> fft_up_equiv(std::vector<size_t>& fftOffset, std::vector<potential_t>& allUpEquiv) { const int nConv = m_numConvPoints; auto map = generate_surf2conv_up<potential_t>(m_p); size_t fftSize = NCHILD * m_numFreq; std::vector<complex_t> fftIn(fftOffset.size() * fftSize); ivec3 dim = ivec3{m_p, m_p, m_p} * 2; fft<potential_t, fft_dir::forwards> fftPlan(3, dim.data(), NCHILD, nConv, m_numFreq); #pragma omp parallel for for (int node_idx = 0; node_idx < static_cast<int>(fftOffset.size()); node_idx++) { std::vector<complex_t> buffer(fftSize, 0); std::vector<potential_t> equiv_t(NCHILD * nConv, potential_t(0.)); for (int k = 0; k < m_numSurf; k++) { size_t idx = map[k]; for (int j = 0; j < NCHILD; j++) equiv_t[idx + j * nConv] = allUpEquiv[fftOffset[node_idx] + j * m_numSurf + k]; } fftPlan.execute(equiv_t.data(), buffer.data()); for (int k = 0; k < m_numFreq; k++) { for (int j = 0; j < NCHILD; j++) { fftIn[fftSize * node_idx + NCHILD * k + j] = buffer[m_numFreq * j + k]; } } } return fftIn; } void ifft_dn_check(std::vector<size_t>& ifftOffset, std::vector<complex_t>& fftOut, std::vector<potential_t>& allDownEquiv) { auto map = generate_surf2conv_dn<potential_t>(m_p); size_t fftSize = NCHILD * m_numFreq; ivec3 dim = ivec3{m_p, m_p, m_p} * 2; fft<potential_t, fft_dir::backwards> fftPlan(3, dim.data(), NCHILD, m_numFreq, m_numConvPoints); #pragma omp parallel for for (int node_idx = 0; node_idx < static_cast<int>(ifftOffset.size()); node_idx++) { std::vector<complex_t> fqDomainData(fftSize, 0); std::vector<potential_t> tmDomainData(NCHILD * m_numConvPoints, 0); potential_t* downEquiv = &allDownEquiv[ifftOffset[node_idx]]; for (int k = 0; k < m_numFreq; k++) { for (int j = 0; j < NCHILD; j++) { fqDomainData[m_numFreq * j + k] = fftOut[fftSize * node_idx + NCHILD * k + j]; } } fftPlan.execute(fqDomainData.data(), tmDomainData.data()); for (int k = 0; k < m_numSurf; k++) { size_t idx = map[k]; for (int j = 0; j < NCHILD; j++) downEquiv[m_numSurf * j + k] += tmDomainData[idx + j * m_numConvPoints]; } } } }; } // namespace mfmm #endif // INCLUDE_MFMM_FMM_H_
nested.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void level3(int parent) { #pragma omp parallel num_threads(2) { #pragma omp critical printf("L3: parent %d, thread %d / %d, level %d (nested regions %d)\n", parent, omp_get_thread_num(), omp_get_num_threads(), omp_get_active_level(), omp_get_level()); // OMP level 3 >= max_active_levels => 1 thread in region // We have now <= 2 * 3 threads } } void level2(int parent) { #pragma omp parallel num_threads(3) { #pragma omp critical printf("L2: parent %d, thread %d / %d, level %d (nested regions %d)\n", parent, omp_get_thread_num(), omp_get_num_threads(), omp_get_active_level(), omp_get_level()); // OMP level 2, we have <= 2 * 3 threads level3(omp_get_thread_num()); } } void level1() { #pragma omp parallel num_threads(2) { #pragma omp critical printf("L1: thread %d / %d, level %d (nested regions %d)\n", omp_get_thread_num(), omp_get_num_threads(), omp_get_active_level(), omp_get_level()); // OMP level 1, we have 2 threads here level2(omp_get_thread_num()); } } int main(int argc, char **argv) { printf("OMP_DYNAMIC: %d\n", omp_get_dynamic()); printf("OMP_NESTED: %d\n", omp_get_nested()); printf("OMP_THREAD_LIMIT: %d\n", omp_get_thread_limit()); printf("OMP_MAX_ACTIVE_LEVELS: %d\n", omp_get_max_active_levels()); printf("ActiveNestedParRegions: %d\n", omp_get_active_level()); printf("NestedParRegions: %d\n", omp_get_level()); omp_set_nested(1); omp_set_max_active_levels(2); level1(); return 0; }
schedule-modifiers-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (void) { int i; #pragma omp for simd schedule (simd, simd: static, 5) for (i = 0; i < 64; i++) ; #pragma omp for simd schedule (monotonic, simd: static) for (i = 0; i < 64; i++) ; #pragma omp for simd schedule (simd , monotonic : static, 6) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic, monotonic : static, 7) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic, nonmonotonic : dynamic) for (i = 0; i < 64; i++) ; #pragma omp for simd schedule (nonmonotonic , simd : dynamic, 3) for (i = 0; i < 64; i++) ; #pragma omp for simd schedule (nonmonotonic,simd:guided,4) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic: static, 2) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : static) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : dynamic) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : dynamic, 3) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : guided) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : guided, 7) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : runtime) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : auto) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : dynamic) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : dynamic, 3) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : guided) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : guided, 7) for (i = 0; i < 64; i++) ; } void bar (void) { int i; #pragma omp for schedule (nonmonotonic: static, 2) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : static) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : runtime) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : auto) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : static) ordered /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) #pragma omp ordered ; #pragma omp for ordered schedule (nonmonotonic: static, 4) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) #pragma omp ordered ; #pragma omp for schedule (nonmonotonic : dynamic) ordered /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) #pragma omp ordered ; #pragma omp for ordered schedule(nonmonotonic : dynamic, 5) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) #pragma omp ordered ; #pragma omp for schedule (nonmonotonic : guided) ordered(1) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) { #pragma omp ordered depend(sink: i - 1) #pragma omp ordered depend(source) } #pragma omp for ordered(1) schedule(nonmonotonic : guided, 2) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) { #pragma omp ordered depend(source) #pragma omp ordered depend(sink: i - 1) } #pragma omp for schedule(nonmonotonic : runtime) ordered(1) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) { #pragma omp ordered depend(source) #pragma omp ordered depend(sink: i - 1) } #pragma omp for schedule (nonmonotonic , monotonic : dynamic) /* { dg-error "both .monotonic. and .nonmonotonic. modifiers specified" } */ for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic,nonmonotonic:dynamic) /* { dg-error "both .monotonic. and .nonmonotonic. modifiers specified" } */ for (i = 0; i < 64; i++) ; }
join.c
/* Copyright 2013-2015. The Regents of the University of California. * Copyright 2015. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013, 2015 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2015 Jonathan Tamir <jtamir@eecs.berkeley.edu> */ #include <stdbool.h> #include <complex.h> #include <string.h> #include <unistd.h> #include "num/multind.h" #include "num/init.h" #include "misc/mmio.h" #include "misc/debug.h" #include "misc/misc.h" #include "misc/opts.h" #include "misc/io.h" #ifndef DIMS #define DIMS 16 #endif #ifndef CFL_SIZE #define CFL_SIZE sizeof(complex float) #endif static const char help_str[] = "Join input files along {dimensions}. All other dimensions must have the same size.\n" "\t Example 1: join 0 slice_001 slice_002 slice_003 full_data\n" "\t Example 2: join 0 `seq -f \"slice_%%03g\" 0 255` full_data"; int main_join(int argc, char* argv[argc]) { long count = 0; int dim = -1; const char** in_files = NULL; const char* out_file = NULL; struct arg_s args[] = { ARG_INT(true, &dim, "dimension"), ARG_TUPLE(true, &count, 1, OPT_INFILE, sizeof(char*), &in_files, "input"), ARG_INOUTFILE(true, &out_file, "output"), }; bool append = false; const struct opt_s opts[] = { OPT_SET('a', &append, "append - only works for cfl files!"), }; cmdline(&argc, argv, ARRAY_SIZE(args), args, help_str, ARRAY_SIZE(opts), opts); num_init(); int N = DIMS; assert(dim < N); if (append) { count += 1; assert(count > 1); int len = strlen(out_file); char buf[len + 5]; strcpy(buf, out_file); strcat(buf, ".cfl"); if (-1 == access(buf, F_OK)) { // make sure we do not have any other file format strcpy(buf, out_file); strcat(buf, ".coo"); assert(-1 == access(buf, F_OK)); strcpy(buf, out_file); strcat(buf, ".ra"); assert(-1 == access(buf, F_OK)); count--; append = false; } } long in_dims[count][N]; long offsets[count]; complex float* idata[count]; long sum = 0; // figure out size of output for (int l = 0, i = 0; i < count; i++) { const char* name = NULL; if (append && (i == 0)) { name = out_file; } else { name = in_files[l++]; } debug_printf(DP_DEBUG1, "loading %s\n", name); idata[i] = load_cfl(name, N, in_dims[i]); offsets[i] = sum; sum += in_dims[i][dim]; for (int j = 0; j < N; j++) assert((dim == j) || (in_dims[0][j] == in_dims[i][j])); if (append && (i == 0)) unmap_cfl(N, in_dims[i], idata[i]); } long out_dims[N]; for (int i = 0; i < N; i++) out_dims[i] = in_dims[0][i]; out_dims[dim] = sum; if (append) { // Here, we need to trick the IO subsystem into absolutely NOT // unlinking our input, as the same file is also an output here. io_close(out_file); } complex float* out_data = create_cfl(out_file, N, out_dims); long ostr[N]; md_calc_strides(N, ostr, out_dims, CFL_SIZE); #pragma omp parallel for for (int i = 0; i < count; i++) { if (!(append && (0 == i))) { long pos[N]; md_singleton_strides(N, pos); pos[dim] = offsets[i]; long istr[N]; md_calc_strides(N, istr, in_dims[i], CFL_SIZE); md_copy_block(N, pos, out_dims, out_data, in_dims[i], idata[i], CFL_SIZE); unmap_cfl(N, in_dims[i], idata[i]); debug_printf(DP_DEBUG1, "done copying file %d\n", i); } } unmap_cfl(N, out_dims, out_data); xfree(in_files); return 0; }
DRB099-targetparallelfor2-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* use of omp target + map + array sections derived from pointers */ void foo(double * a, double * b, int N) { int i; #pragma cetus private(i) #pragma loop name foo#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<N; i ++ ) { b[i]=(a[i]*((double)i)); } return ; } int main(int argc, char * argv[]) { int i; int len = 1000; double a[len], b[len]; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=(((double)i)/2.0); b[i]=0.0; } foo(a, b, len); printf("b[50]=%f\n", b[50]); _ret_val_0=0; return _ret_val_0; }
ast-dump-openmp-taskgroup.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp taskgroup ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-taskgroup.c:3:1, line:6:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1> // CHECK-NEXT: `-OMPTaskgroupDirective {{.*}} <line:4:9, col:22> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-NullStmt {{.*}} <col:3> openmp_structured_block // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskgroup.c:4:9) *const restrict'
3D.h
#define sigma(i, j) (can_pair(RNA, i, j)) void n3D() { int n = N; int c0,c1,c2,c3,c5,c6,c7,c9,c11,c10,c4,c12; for (int c0 = floord(-31 * n + 115, 3132) + 2; c0 <= floord(79 * n - 158, 2436) + 2; c0 += 1) { #pragma omp parallel for for (int c1 = max(-c0 - (n + 52) / 54 + 2, -((n + 114) / 116)); c1 <= min(min(-c0 + (n - 2) / 42 + 1, c0 + ((-4 * c0 + 3)/31) - 1), (-21 * c0 + 20)/79); c1 += 1) { for (int c2 = max(-c0 + c1 + floord(21 * c0 - 17 * c1 - 21, 48) + 1, -c0 - c1 - (n - 42 * c0 - 42 * c1 + 136) / 96 + 1); c2 <= min(min(-1, -c0 - c1), -((27 * c0 - 31 * c1 + 54) / 69) + 1); c2 += 1) { for (int c5 = max(27 * c0 - 31 * c1 + 27 * c2 - 83, -42 * c2 - 41); c5 <= min(min(n + 54 * c0 + 54 * c1 + 54 * c2 - 1, -42 * c2), 54 * c0 - 62 * c1 + 54 * c2); c5 += 1) { for (int c6 = max(-54 * c0 - 54 * c1 - 54 * c2, -116 * c1 - 2 * c5 - 114); c6 <= min(min(-54 * c0 - 54 * c1 - 54 * c2 + 53, n - c5 - 1), -116 * c1 - c5); c6 += 1) { for (int c7 = max(-116 * c1 - 115, c5 + c6); c7 <= min(min(n - 1, -116 * c1), 2 * c5 + c6 - 1); c7 += 1) { if (2 * c5 + c6 >= c7 + 2) { S[c6][c7] = MAX(S[c6][-c5 + c7] + S[-c5 + c7 + 1][c7], S[c6][c7]); if (c7 == c5 + c6) { S[c6][c5 + c6] = MAX(S[c6][c5 + c6], S[c6 + 1][c5 + c6 - 1] + sigma(c6, c5 + c6)); } } S[c6][c7] = MAX(S[c6][c5 + c6 - 1] + S[c5 + c6][c7], S[c6][c7]); if (c7 == c5 + c6) { S[c6][c5 + c6] = MAX(S[c6][c5 + c6], S[c6 + 1][c5 + c6 - 1] + sigma(c6, c5 + c6)); } } } } } } } }
reduction_issue_16.c
#include <stdio.h> #define N 1000000ll #define SUM (N * (N-1)/2) int main (void) { #pragma omp target { long long a, i; a = 0; #pragma omp parallel for reduction(+:a) for (i = 0; i < N; i++) { a += i; } { if (a != SUM) printf ("Incorrect result = %lld, expected = %lld!\n", a, SUM); else printf ("The result is correct = %lld!\n", a); } } return 0; }
_phonopy.c
/* Copyright (C) 2011 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <stdio.h> #include <stddef.h> #include <math.h> #include <float.h> #include <numpy/arrayobject.h> #include <dynmat.h> #include <derivative_dynmat.h> #include <kgrid.h> #include <tetrahedron_method.h> #define KB 8.6173382568083159E-05 /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args); static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args); static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args); static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args); static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args); static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args); static PyObject * py_distribute_fc2(PyObject *self, PyObject *args); static PyObject * py_compute_permutation(PyObject *self, PyObject *args); static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args); static PyObject * py_gsv_set_smallest_vectors(PyObject *self, PyObject *args); static PyObject * py_thm_neighboring_grid_points(PyObject *self, PyObject *args); static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args); static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args); static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args); static void distribute_fc2(double (*fc2)[3][3], const int * atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], const int * permutations, const int * map_atoms, const int * map_syms, const int num_rot, const int num_pos); static int compute_permutation(int * rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec); static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int * multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec); static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int (*lattice_points)[3], const int num_lattice_points, PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec); static double get_free_energy(const double temperature, const double f); static double get_entropy(const double temperature, const double f); static double get_heat_capacity(const double temperature, const double f); static void set_index_permutation_symmetry_fc(double * fc, const int natom); static void set_translational_symmetry_fc(double * fc, const int natom); static void set_index_permutation_symmetry_compact_fc(double * fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose); static void set_translational_symmetry_compact_fc(double * fc, const int p2s[], const int n_satom, const int n_patom); /* static double get_energy(double temperature, double f); */ static int nint(const double a); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject * error_out(PyObject *m) { struct module_state *st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"dipole_dipole", py_get_dipole_dipole, METH_VARARGS, "Dipole-dipole interaction"}, {"dipole_dipole_q0", py_get_dipole_dipole_q0, METH_VARARGS, "q=0 terms of Dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2", py_distribute_fc2, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_copy_smallest_vectors", py_gsv_copy_smallest_vectors, METH_VARARGS, "Implementation detail of get_smallest_vectors."}, {"gsv_set_smallest_vectors", py_gsv_set_smallest_vectors, METH_VARARGS, "Set candidate vectors."}, {"neighboring_grid_points", py_thm_neighboring_grid_points, METH_VARARGS, "Neighboring grid points by relative grid addresses"}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"get_tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL }; #define INITERROR return NULL PyObject * PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state *st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_dynamical_matrices; PyArrayObject* py_commensurate_points; PyArrayObject* py_shortest_vectors; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2pp_map; PyArrayObject* py_fc_index_map; double* fc; double* dm; double (*comm_points)[3]; double (*shortest_vectors)[27][3]; double* masses; int* multiplicities; int* s2pp_map; int* fc_index_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double*)PyArray_DATA(py_force_constants); dm = (double*)PyArray_DATA(py_dynamical_matrices); comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points); shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); masses = (double*)PyArray_DATA(py_masses); multiplicities = (int*)PyArray_DATA(py_multiplicities); s2pp_map = (int*)PyArray_DATA(py_s2pp_map); fc_index_map = (int*)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multiplicities)[1]; num_satom = PyArray_DIMS(py_multiplicities)[0]; dym_transform_dynmat_to_fc(fc, dm, comm_points, shortest_vectors, multiplicities, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject * py_compute_permutation(PyObject *self, PyObject *args) { PyArrayObject* permutation; PyArrayObject* lattice; PyArrayObject* positions; PyArrayObject* permuted_positions; double symprec; int* rot_atoms; double (*lat)[3]; double (*pos)[3]; double (*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int*)PyArray_DATA(permutation); lat = (double(*)[3])PyArray_DATA(lattice); pos = (double(*)[3])PyArray_DATA(positions); rot_pos = (double(*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); if (is_found) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args) { PyArrayObject* py_shortest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_vectors; PyArrayObject* py_lengths; double symprec; double (*shortest_vectors)[27][3]; double (*vectors)[27][3]; double (*lengths)[27]; int * multiplicity; int size_super, size_prim; if (!PyArg_ParseTuple(args, "OOOOd", &py_shortest_vectors, &py_multiplicity, &py_vectors, &py_lengths, &symprec)) { return NULL; } shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); vectors = (double(*)[27][3])PyArray_DATA(py_vectors); lengths = (double(*)[27])PyArray_DATA(py_lengths); size_super = PyArray_DIMS(py_vectors)[0]; size_prim = PyArray_DIMS(py_vectors)[1]; gsv_copy_smallest_vectors(shortest_vectors, multiplicity, vectors, lengths, size_super * size_prim, symprec); Py_RETURN_NONE; } static PyObject * py_gsv_set_smallest_vectors(PyObject *self, PyObject *args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; double symprec; double (*smallest_vectors)[27][3]; int * multiplicity; double (*pos_to)[3]; double (*pos_from)[3]; int (*lattice_points)[3]; double (*reduced_basis)[3]; int (*trans_mat)[3]; int num_pos_to, num_pos_from, num_lattice_points; if (!PyArg_ParseTuple(args, "OOOOOOOd", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &symprec)) { return NULL; } smallest_vectors = (double(*)[27][3])PyArray_DATA(py_smallest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (int(*)[3])PyArray_DATA(py_lattice_points); num_lattice_points = PyArray_DIMS(py_lattice_points)[0]; reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (int(*)[3])PyArray_DATA(py_trans_mat); gsv_set_smallest_vectors(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, num_lattice_points, reduced_basis, trans_mat, symprec); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args) { PyArrayObject* force_constants; double *fc; int level; int n_satom, i, j, k, l, iter; double sum; if (!PyArg_ParseTuple(args, "Oi", &force_constants, &level)) { return NULL; } fc = (double*)PyArray_DATA(force_constants); n_satom = PyArray_DIMS(force_constants)[0]; for (iter=0; iter < level; iter++) { /* Subtract drift along column */ for (j = 0; j < n_satom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (i = 0; i < n_satom; i++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (i = 0; i < n_satom; i++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } /* Subtract drift along row */ for (i = 0; i < n_satom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } set_index_permutation_symmetry_fc(fc, n_satom); } set_translational_symmetry_fc(fc, n_satom); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; int level; double *fc; int *perms; int *s2pp; int *p2s; int *nsym_list; int n_patom, n_satom, i, j, k, l, n, iter; double sum; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; for (iter=0; iter < level; iter++) { for (n = 0; n < 2; n++) { /* transpose only */ set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); for (i = 0; i < n_patom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } } set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 0); } set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom); Py_RETURN_NONE; } static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; double *fc; int *s2pp; int *p2s; int *nsym_list; int *perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_shortest_vectors; PyArrayObject* py_q; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; double* dm; double* fc; double* q; double (*svecs)[27][3]; double* m; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double*)PyArray_DATA(py_masses); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_shortest_vectors; PyArrayObject* py_q_cart; PyArrayObject* py_q; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; double factor; double* dm; double* fc; double* q_cart; double* q; double (*svecs)[27][3]; double* m; double (*born)[3][3]; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; int n; double (*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q_cart = (double*)PyArray_DATA(py_q_cart); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double*)PyArray_DATA(py_masses); born = (double(*)[3][3])PyArray_DATA(py_born); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double(*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; dym_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args) { PyArrayObject* py_dd; PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_q_cart; PyArrayObject* py_q_direction; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double factor; double lambda; double tolerance; double* dd; double* dd_q0; double (*G_list)[3]; double* q_vector; double* q_direction; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double*)PyArray_DATA(py_dd); dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); if ((PyObject*)py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double*)PyArray_DATA(py_q_direction); } q_vector = (double*)PyArray_DATA(py_q_cart); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args) { PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double lambda; double tolerance; double* dd_q0; double (*G_list)[3]; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args) { PyArrayObject* derivative_dynmat; PyArrayObject* py_force_constants; PyArrayObject* r_vector; PyArrayObject* lattice; PyArrayObject* q_vector; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; PyArrayObject* dielectric; PyArrayObject* q_direction; double nac_factor; double* ddm; double* fc; double* q; double* lat; double* r; double* m; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; double *z; double *epsilon; double *q_dir; if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO", &derivative_dynmat, &py_force_constants, &q_vector, &lattice, /* column vectors */ &r_vector, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &dielectric, &q_direction)) { return NULL; } ddm = (double*)PyArray_DATA(derivative_dynmat); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(q_vector); lat = (double*)PyArray_DATA(lattice); r = (double*)PyArray_DATA(r_vector); m = (double*)PyArray_DATA(py_masses); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject*)py_born == Py_None) { z = NULL; } else { z = (double*)PyArray_DATA(py_born); } if ((PyObject*)dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double*)PyArray_DATA(dielectric); } if ((PyObject*)q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double*)PyArray_DATA(q_direction); } get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q, lat, r, multi, m, s2p_map, p2s_map, nac_factor, z, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args) { PyArrayObject* py_thermal_props; PyArrayObject* py_temperatures; PyArrayObject* py_frequencies; PyArrayObject* py_weights; double cutoff_frequency; double *temperatures; double* freqs; double *thermal_props; int* w; int num_qpoints; int num_bands; int num_temp; int i, j, k; double f; double *tp; if (!PyArg_ParseTuple(args, "OOOOd", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights, &cutoff_frequency)) { return NULL; } thermal_props = (double*)PyArray_DATA(py_thermal_props); temperatures = (double*)PyArray_DATA(py_temperatures); num_temp = PyArray_DIMS(py_temperatures)[0]; freqs = (double*)PyArray_DATA(py_frequencies); num_qpoints = PyArray_DIMS(py_frequencies)[0]; w = (int*)PyArray_DATA(py_weights); num_bands = PyArray_DIMS(py_frequencies)[1]; tp = (double*)malloc(sizeof(double) * num_qpoints * num_temp * 3); for (i = 0; i < num_qpoints * num_temp * 3; i++) { tp[i] = 0; } #pragma omp parallel for private(j, k, f) for (i = 0; i < num_qpoints; i++){ for (j = 0; j < num_temp; j++) { for (k = 0; k < num_bands; k++){ f = freqs[i * num_bands + k]; if (temperatures[j] > 0 && f > cutoff_frequency) { tp[i * num_temp * 3 + j * 3] += get_free_energy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 1] += get_entropy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 2] += get_heat_capacity(temperatures[j], f) * w[i]; } } } } for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp * 3; j++) { thermal_props[j] += tp[i * num_temp * 3 + j]; } } free(tp); tp = NULL; Py_RETURN_NONE; } static PyObject * py_distribute_fc2(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_permutations; PyArrayObject* py_map_atoms; PyArrayObject* py_map_syms; PyArrayObject* py_atom_list; PyArrayObject* py_rotations_cart; double (*r_carts)[3][3]; double (*fc2)[3][3]; int *permutations; int *map_atoms; int *map_syms; int *atom_list; npy_intp num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int*)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int*)PyArray_DATA(py_permutations); map_atoms = (int*)PyArray_DATA(py_map_atoms); map_syms = (int*)PyArray_DATA(py_map_syms); r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject *py_thm_neighboring_grid_points(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_points; PyArrayObject* py_relative_grid_address; PyArrayObject* py_mesh; PyArrayObject* py_bz_grid_address; PyArrayObject* py_bz_map; long grid_point; int (*relative_grid_address)[3]; int num_relative_grid_address; int *mesh; int (*bz_grid_address)[3]; size_t *bz_map_size_t; size_t *relative_grid_points_size_t; if (!PyArg_ParseTuple(args, "OlOOOO", &py_relative_grid_points, &grid_point, &py_relative_grid_address, &py_mesh, &py_bz_grid_address, &py_bz_map)) { return NULL; } relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address); num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0]; mesh = (int*)PyArray_DATA(py_mesh); bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address); bz_map_size_t = (size_t*)PyArray_DATA(py_bz_map); relative_grid_points_size_t = (size_t*)PyArray_DATA(py_relative_grid_points); thm_get_dense_neighboring_grid_points(relative_grid_points_size_t, grid_point, relative_grid_address, num_relative_grid_address, mesh, bz_grid_address, bz_map_size_t); Py_RETURN_NONE; } static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; PyArrayObject* py_reciprocal_lattice_py; int (*relative_grid_address)[4][3]; double (*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py); thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; int (*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (int(*)[24][4][3])PyArray_DATA(py_relative_grid_address); thm_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args) { double omega; PyArrayObject* py_tetrahedra_omegas; char* function; double (*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = thm_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args) { PyArrayObject* py_integration_weights; PyArrayObject* py_omegas; PyArrayObject* py_tetrahedra_omegas; char* function; double *omegas; double *iw; int num_omegas; double (*tetrahedra_omegas)[4]; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double*)PyArray_DATA(py_omegas); iw = (double*)PyArray_DATA(py_integration_weights); num_omegas = (int)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); thm_get_integration_weight_at_omegas(iw, num_omegas, omegas, tetrahedra_omegas, function[0]); Py_RETURN_NONE; } static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args) { PyArrayObject* py_freq_tetras; PyArrayObject* py_grid_points; PyArrayObject* py_mesh; PyArrayObject* py_grid_address; PyArrayObject* py_gp_ir_index; PyArrayObject* py_relative_grid_address; PyArrayObject* py_frequencies; double* freq_tetras; size_t* grid_points; int* mesh; int (*grid_address)[3]; size_t* gp_ir_index; int (*relative_grid_address)[3]; double* frequencies; int is_shift[3] = {0, 0, 0}; size_t i, j, k, gp, num_gp_in, num_band; int g_addr[3]; int address_double[3]; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double*)PyArray_DATA(py_freq_tetras); grid_points = (size_t*)PyArray_DATA(py_grid_points); num_gp_in = PyArray_DIMS(py_grid_points)[0]; mesh = (int*)PyArray_DATA(py_mesh); grid_address = (int(*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (size_t*)PyArray_DATA(py_gp_ir_index); relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double*)PyArray_DATA(py_frequencies); num_band = PyArray_DIMS(py_frequencies)[1]; for (i = 0; i < num_gp_in; i++) { #pragma omp parallel for private(k, g_addr, gp, address_double) for (j = 0; j < num_band * 96; j++) { for (k = 0; k < 3; k++) { g_addr[k] = grid_address[grid_points[i]][k] + relative_grid_address[j % 96][k]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); gp = kgd_get_dense_grid_point_double_mesh(address_double, mesh); freq_tetras[i * num_band * 96 + j] = frequencies[gp_ir_index[gp] * num_band + j / 96]; } } Py_RETURN_NONE; } static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args) { PyArrayObject* py_dos; PyArrayObject* py_mesh; PyArrayObject* py_freq_points; PyArrayObject* py_frequencies; PyArrayObject* py_coef; PyArrayObject* py_grid_address; PyArrayObject* py_grid_mapping_table; PyArrayObject* py_relative_grid_address; double *dos; int* mesh; double* freq_points; double* frequencies; double* coef; int (*grid_address)[3]; size_t num_gp, num_ir_gp, num_band, num_freq_points, num_coef; size_t *grid_mapping_table; int (*relative_grid_address)[4][3]; int is_shift[3] = {0, 0, 0}; size_t i, j, k, l, m, q, r, count; size_t ir_gps[24][4]; int g_addr[3]; double tetrahedra[24][4]; int address_double[3]; size_t *gp2ir, *ir_grid_points; int *weights; double iw; gp2ir = NULL; ir_grid_points = NULL; weights = NULL; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double*)PyArray_DATA(py_dos); mesh = (int*)PyArray_DATA(py_mesh); freq_points = (double*)PyArray_DATA(py_freq_points); num_freq_points = (size_t)PyArray_DIMS(py_freq_points)[0]; frequencies = (double*)PyArray_DATA(py_frequencies); num_ir_gp = (size_t)PyArray_DIMS(py_frequencies)[0]; num_band = (size_t)PyArray_DIMS(py_frequencies)[1]; coef = (double*)PyArray_DATA(py_coef); num_coef = (size_t)PyArray_DIMS(py_coef)[1]; grid_address = (int(*)[3])PyArray_DATA(py_grid_address); num_gp = (size_t)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (size_t*)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address); gp2ir = (size_t*)malloc(sizeof(size_t) * num_gp); ir_grid_points = (size_t*)malloc(sizeof(size_t) * num_ir_gp); weights = (int*)malloc(sizeof(int) * num_ir_gp); count = 0; for (i = 0; i < num_gp; i++) { if (grid_mapping_table[i] == i) { gp2ir[i] = count; ir_grid_points[count] = i; weights[count] = 1; count++; } else { gp2ir[i] = gp2ir[grid_mapping_table[i]]; weights[gp2ir[i]]++; } } if (num_ir_gp != count) { printf("Something is wrong!\n"); } #pragma omp parallel for private(j, k, l, m, q, r, iw, ir_gps, g_addr, tetrahedra, address_double) for (i = 0; i < num_ir_gp; i++) { /* set 24 tetrahedra */ for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[ir_grid_points[i]][r] + relative_grid_address[l][q][r]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); ir_gps[l][q] = gp2ir[kgd_get_grid_point_double_mesh(address_double, mesh)]; } } for (k = 0; k < num_band; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k]; } } for (j = 0; j < num_freq_points; j++) { iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i]; for (m = 0; m < num_coef; m++) { dos[i * num_band * num_freq_points * num_coef + k * num_coef * num_freq_points + j * num_coef + m] += iw * coef[i * num_coef * num_band + m * num_band + k]; } } } } free(gp2ir); gp2ir = NULL; free(ir_grid_points); ir_grid_points = NULL; free(weights); weights = NULL; Py_RETURN_NONE; } static double get_free_energy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ return KB * temperature * log(1 - exp(- f / (KB * temperature))); } static double get_entropy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ double val; val = f / (2 * KB * temperature); return 1 / (2 * temperature) * f * cosh(val) / sinh(val) - KB * log(2 * sinh(val)); } static double get_heat_capacity(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ /* If val is close to 1. Then expansion is used. */ double val, val1, val2; val = f / (KB * temperature); val1 = exp(val); val2 = (val) / (val1 - 1); return KB * val1 * val2 * val2; } /* static double get_energy(double temperature, double f){ */ /* /\* temperature is defined by T (K) *\/ */ /* /\* 'f' must be given in eV. *\/ */ /* return f / (exp(f / (KB * temperature)) - 1); */ /* } */ static int compute_permutation(int * rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec) { int i,j,k,l; int search_start; double distance2, diff_cart; double diff[3]; for (i = 0; i < num_pos; i++) { rot_atom[i] = -1; } /* optimization: Iterate primarily by pos instead of rot_pos. */ /* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */ /* Then track the first unassigned index. */ /* */ /* This works best if the permutation is close to the identity. */ /* (more specifically, if the max value of 'rot_atom[i] - i' is small) */ search_start = 0; for (i = 0; i < num_pos; i++) { while (rot_atom[search_start] >= 0) { search_start++; } for (j = search_start; j < num_pos; j++) { if (rot_atom[j] >= 0) { continue; } for (k = 0; k < 3; k++) { diff[k] = pos[i][k] - rot_pos[j][k]; diff[k] -= nint(diff[k]); } distance2 = 0; for (k = 0; k < 3; k++) { diff_cart = 0; for (l = 0; l < 3; l++) { diff_cart += lat[k][l] * diff[l]; } distance2 += diff_cart * diff_cart; } if (sqrt(distance2) < symprec) { rot_atom[j] = i; break; } } } for (i = 0; i < num_pos; i++) { if (rot_atom[i] < 0) { return 0; } } return 1; } /* Implementation detail of get_smallest_vectors. */ /* Finds the smallest vectors within each list and copies them to the output. */ static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int * multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec) { int i,j,k; int count; double minimum; double (*vectors)[3]; double *lengths; for (i = 0; i < num_lists; i++) { /* Look at a single list of 27 vectors. */ lengths = length_lists[i]; vectors = vector_lists[i]; /* Compute the minimum length. */ minimum = DBL_MAX; for (j = 0; j < 27; j++) { if (lengths[j] < minimum) { minimum = lengths[j]; } } /* Copy vectors whose length is within tolerance. */ count = 0; for (j = 0; j < 27; j++) { if (lengths[j] - minimum <= symprec) { for (k = 0; k < 3; k++) { shortest_vectors[i][count][k] = vectors[j][k]; } count++; } } multiplicity[i] = count; } } static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int (*lattice_points)[3], const int num_lattice_points, PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec) { int i, j, k, l, count; double length_tmp, minimum, vec_xyz; double *length; double (*vec)[3]; length = (double*)malloc(sizeof(double) * num_lattice_points); vec = (double(*)[3])malloc(sizeof(double[3]) * num_lattice_points); for (i = 0; i < num_pos_to; i++) { for (j = 0; j < num_pos_from; j++) { for (k = 0; k < num_lattice_points; k++) { length[k] = 0; for (l = 0; l < 3; l++) { vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l]; } for (l = 0; l < 3; l++) { length_tmp = (reduced_basis[l][0] * vec[k][0] + reduced_basis[l][1] * vec[k][1] + reduced_basis[l][2] * vec[k][2]); length[k] += length_tmp * length_tmp; } length[k] = sqrt(length[k]); } minimum = DBL_MAX; for (k = 0; k < num_lattice_points; k++) { if (length[k] < minimum) { minimum = length[k]; } } count = 0; for (k = 0; k < num_lattice_points; k++) { if (length[k] - minimum < symprec) { for (l = 0; l < 3; l++) { /* Transform to supercell coordinates */ vec_xyz = (trans_mat[l][0] * vec[k][0] + trans_mat[l][1] * vec[k][1] + trans_mat[l][2] * vec[k][2]); smallest_vectors[i * num_pos_from + j][count][l] = vec_xyz; } count++; } } if (count > 27) { /* should not be greater than 27 */ printf("Warning (gsv_set_smallest_vectors): "); printf("number of shortest vectors is out of range,\n"); break; } else { multiplicity[i * num_pos_from + j] = count; } } } free(length); length = NULL; free(vec); vec = NULL; } static void distribute_fc2(double (*fc2)[3][3], /* shape[n_pos][n_pos] */ const int * atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], /* shape[n_rot] */ const int * permutations, /* shape[n_rot][n_pos] */ const int * map_atoms, /* shape [n_pos] */ const int * map_syms, /* shape [n_pos] */ const int num_rot, const int num_pos) { int i, j, k, l, m; int atom_todo, atom_done, atom_other; int sym_index; int *atom_list_reverse; double (*fc2_done)[3]; double (*fc2_todo)[3]; double (*r_cart)[3]; const int * permutation; atom_list_reverse = NULL; atom_list_reverse = (int*)malloc(sizeof(int) * num_pos); /* atom_list_reverse[!atom_done] is undefined. */ for (i = 0; i < len_atom_list; i++) { atom_done = map_atoms[atom_list[i]]; if (atom_done == atom_list[i]) { atom_list_reverse[atom_done] = i; } } for (i = 0; i < len_atom_list; i++) { /* look up how this atom maps into the done list. */ atom_todo = atom_list[i]; atom_done = map_atoms[atom_todo]; sym_index = map_syms[atom_todo]; /* skip the atoms in the done list, */ /* which are easily identified because they map to themselves. */ if (atom_todo == atom_done) { continue; } /* look up information about the rotation */ r_cart = r_carts[sym_index]; permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */ /* distribute terms from atom_done to atom_todo */ for (atom_other = 0; atom_other < num_pos; atom_other++) { fc2_done = fc2[atom_list_reverse[atom_done] * num_pos + permutation[atom_other]]; fc2_todo = fc2[i * num_pos + atom_other]; for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { /* P' = R^-1 P R */ fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m]; } } } } } } free(atom_list_reverse); atom_list_reverse = NULL; } static void set_index_permutation_symmetry_fc(double * fc, const int natom) { int i, j, k, l, m, n; for (i = 0; i < natom; i++) { /* non diagonal part */ for (j = i + 1; j < natom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i * natom * 9 + j * 9 + k * 3 + l; n = j * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } /* diagnoal part */ for (k = 0; k < 2; k++) { for (l = k + 1; l < 3; l++) { m = i * natom * 9 + i * 9 + k * 3 + l; n = i * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } } static void set_translational_symmetry_fc(double * fc, const int natom) { int i, j, k, l, m; double sums[3][3]; for (i = 0; i < natom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i * natom * 9 + k * 3 + l; for (j = 0; j < natom; j++) { if (i != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static void set_index_permutation_symmetry_compact_fc(double * fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose) { int i, j, k, l, m, n, i_p, j_p, i_trans; double fc_elem; char *done; done = NULL; done = (char*)malloc(sizeof(char) * n_satom * n_patom); for (i = 0; i < n_satom * n_patom; i++) { done[i] = 0; } for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; if (i == j) { /* diagnoal part */ for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { if (l > k) { m = i_p * n_satom * 9 + i * 9 + k * 3 + l; n = i_p * n_satom * 9 + i * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[m] + fc[n]) / 2; fc[n] = fc[m]; } } } } } if (!done[i_p * n_satom + j]) { /* (j, i) -- nsym_list[j] --> (j', i') */ /* nsym_list[j] translates j to j' where j' is in */ /* primitive cell. The same translation sends i to i' */ /* where i' is not necessarily to be in primitive cell. */ /* Thus, i' = perms[nsym_list[j] * n_satom + i] */ i_trans = perms[nsym_list[j] * n_satom + i]; done[i_p * n_satom + j] = 1; done[j_p * n_satom + i_trans] = 1; for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i_p * n_satom * 9 + j * 9 + k * 3 + l; n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[n] + fc[m]) / 2; fc[n] = fc[m]; } } } } } } free(done); done = NULL; } static void set_translational_symmetry_compact_fc(double * fc, const int p2s[], const int n_satom, const int n_patom) { int j, k, l, m, i_p; double sums[3][3]; for (i_p = 0; i_p < n_patom; i_p++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i_p * n_satom * 9 + k * 3 + l; for (j = 0; j < n_satom; j++) { if (p2s[i_p] != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static int nint(const double a) { if (a < 0.0) return (int) (a - 0.5); else return (int) (a + 0.5); }
exact_cover_omp_tasks.c
/** * Version OpenMP avec tâches * * Quentin Deschamps, 2021 */ #include <ctype.h> #include <stdio.h> #include <stdbool.h> #include <string.h> #include <stdlib.h> #include <err.h> #include <getopt.h> #include <sys/time.h> #include <omp.h> double start = 0.0; char *in_filename = NULL; // nom du fichier contenant la matrice bool print_solutions = false; // affiche chaque solution long long report_delta = 1e6; // affiche un rapport tous les ... noeuds long long next_report; // prochain rapport affiché au noeud... long long max_solutions = 0x7fffffffffffffff; // stop après ... solutions /* Variable contenant le nombre de solutions trouvées */ long long solutions = 0; struct instance_t { int n_items; int n_primary; int n_options; char **item_name; // potentiellement NULL, sinon de taille n_items int *options; // l'option i contient les objets options[ptr[i]:ptr[i+1]] int *ptr; // taille n_options + 1 }; struct sparse_array_t { int len; // nombre d'éléments stockés int capacity; // taille maximale int *p; // contenu de l'ensemble = p[0:len] int *q; // taille capacity (tout comme p) }; struct context_t { struct sparse_array_t *active_items; // objets actifs struct sparse_array_t **active_options; // options actives contenant l'objet i int *chosen_options; // options choisies à ce stade int *child_num; // numéro du fils exploré int *num_children; // nombre de fils à explorer int level; // nombre d'options choisies long long nodes; // nombre de noeuds explorés long long solutions; // nombre de solutions trouvées }; static const char DIGITS[62] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}; double wtime() { struct timeval ts; gettimeofday(&ts, NULL); return (double) ts.tv_sec + ts.tv_usec / 1e6; } void usage(char **argv) { printf("%s --in FILENAME [OPTIONS]\n\n", argv[0]); printf("Options:\n"); printf("--progress-report N display a message every N nodes (0 to disable)\n"); printf("--print-solutions display solutions when they are found\n"); printf("--stop-after N stop the search once N solutions are found\n"); exit(0); } bool item_is_primary(const struct instance_t *instance, int item) { return item < instance->n_primary; } void print_option(const struct instance_t *instance, int option) { if (instance->item_name == NULL) errx(1, "tentative d'affichage sans noms d'objet"); for (int p = instance->ptr[option]; p < instance->ptr[option + 1]; p++) { int item = instance->options[p]; printf("%s ", instance->item_name[item]); } printf("\n"); } struct sparse_array_t * sparse_array_init(int n) { struct sparse_array_t *S = malloc(sizeof(*S)); if (S == NULL) err(1, "impossible d'allouer un tableau creux"); S->len = 0; S->capacity = n; S->p = malloc(n * sizeof(int)); S->q = malloc(n * sizeof(int)); if (S->p == NULL || S->q == NULL) err(1, "Impossible d'allouer p/q dans un tableau creux"); for (int i = 0; i < n; i++) S->q[i] = n; // initialement vide return S; } bool sparse_array_membership(const struct sparse_array_t *S, int x) { return (S->q[x] < S->len); } bool sparse_array_empty(const struct sparse_array_t *S) { return (S->len == 0); } void sparse_array_add(struct sparse_array_t *S, int x) { int i = S->len; S->p[i] = x; S->q[x] = i; S->len = i + 1; } void sparse_array_remove(struct sparse_array_t *S, int x) { int j = S->q[x]; int n = S->len - 1; // échange p[j] et p[n] int y = S->p[n]; S->p[n] = x; S->p[j] = y; // met q à jour S->q[x] = n; S->q[y] = j; S->len = n; } void sparse_array_unremove(struct sparse_array_t *S) { S->len++; } void sparse_array_unadd(struct sparse_array_t *S) { S->len--; } /** * Affiche un tableau creux. * * @param S tableau creu */ void sparse_array_print(const struct sparse_array_t *S) { printf("== Sparse array ==\n"); printf("len: %d\n", S->len); printf("capacity: %d\n", S->capacity); printf("p:"); for (int i = 0; i < S->capacity; i++) printf(" %d", S->p[i]); printf("\nq:"); for (int i = 0; i < S->capacity; i++) printf(" %d", S->q[i]); printf("\n"); } bool item_is_active(const struct context_t *ctx, int item) { return sparse_array_membership(ctx->active_items, item); } void solution_found(const struct instance_t *instance, struct context_t *ctx) { ctx->solutions++; if (!print_solutions) return; printf("Trouvé une nouvelle solution au niveau %d après %lld noeuds\n", ctx->level, ctx->nodes); printf("Options : \n"); for (int i = 0; i < ctx->level; i++) { int option = ctx->chosen_options[i]; printf("+ %d : ", option); print_option(instance, option); } printf("\n"); printf("----------------------------------------------------\n"); } void cover(const struct instance_t *instance, struct context_t *ctx, int item); void choose_option(const struct instance_t *instance, struct context_t *ctx, int option, int chosen_item) { ctx->chosen_options[ctx->level] = option; ctx->level++; for (int p = instance->ptr[option]; p < instance->ptr[option + 1]; p++) { int item = instance->options[p]; if (item == chosen_item) continue; cover(instance, ctx, item); } } void uncover(const struct instance_t *instance, struct context_t *ctx, int item); void unchoose_option(const struct instance_t *instance, struct context_t *ctx, int option, int chosen_item) { for (int p = instance->ptr[option + 1] - 1; p >= instance->ptr[option]; p--) { int item = instance->options[p]; if (item == chosen_item) continue; uncover(instance, ctx, item); } ctx->level--; } int choose_next_item(struct context_t *ctx) { int best_item = -1; int best_options = 0x7fffffff; struct sparse_array_t *active_items = ctx->active_items; for (int i = 0; i < active_items->len; i++) { int item = active_items->p[i]; struct sparse_array_t *active_options = ctx->active_options[item]; int k = active_options->len; if (k < best_options) { best_item = item; best_options = k; } } return best_item; } void progress_report(const struct context_t *ctx) { double now = wtime(); printf("Exploré %lld noeuds, trouvé %lld solutions, temps écoulé %.1fs. ", ctx->nodes, ctx->solutions, now - start); int i = 0; for (int k = 0; k < ctx->level; k++) { if (i > 44) break; int n = ctx->child_num[k]; int m = ctx->num_children[k]; if (m == 1) continue; printf("%c%c ", (n < 62) ? DIGITS[n] : '*', (m < 62) ? DIGITS[m] : '*'); i++; } printf("\n"), next_report += report_delta; } void deactivate(const struct instance_t *instance, struct context_t *ctx, int option, int covered_item); void cover(const struct instance_t *instance, struct context_t *ctx, int item) { if (item_is_primary(instance, item)) sparse_array_remove(ctx->active_items, item); struct sparse_array_t *active_options = ctx->active_options[item]; for (int i = 0; i < active_options->len; i++) { int option = active_options->p[i]; deactivate(instance, ctx, option, item); } } void deactivate(const struct instance_t *instance, struct context_t *ctx, int option, int covered_item) { for (int k = instance->ptr[option]; k < instance->ptr[option+1]; k++) { int item = instance->options[k]; if (item == covered_item) continue; sparse_array_remove(ctx->active_options[item], option); } } void reactivate(const struct instance_t *instance, struct context_t *ctx, int option, int uncovered_item); void uncover(const struct instance_t *instance, struct context_t *ctx, int item) { struct sparse_array_t *active_options = ctx->active_options[item]; for (int i = active_options->len - 1; i >= 0; i--) { int option = active_options->p[i]; reactivate(instance, ctx, option, item); } if (item_is_primary(instance, item)) sparse_array_unremove(ctx->active_items); } void reactivate(const struct instance_t *instance, struct context_t *ctx, int option, int uncovered_item) { for (int k = instance->ptr[option + 1] - 1; k >= instance->ptr[option]; k--) { int item = instance->options[k]; if (item == uncovered_item) continue; sparse_array_unremove(ctx->active_options[item]); } } struct instance_t * load_matrix(const char *filename) { struct instance_t *instance = malloc(sizeof(*instance)); if (instance == NULL) err(1, "Impossible d'allouer l'instance"); FILE *in = fopen(filename, "r"); if (in == NULL) err(1, "Impossible d'ouvrir %s en lecture", filename); int n_it, n_op; if (fscanf(in, "%d %d\n", &n_it, &n_op) != 2) errx(1, "Erreur de lecture de la taille du problème\n"); if (n_it == 0 || n_op == 0) errx(1, "Impossible d'avoir 0 objets ou 0 options"); instance->n_items = n_it; instance->n_primary = 0; instance->n_options = n_op; instance->item_name = malloc(n_it * sizeof(char *)); instance->ptr = malloc((n_op + 1) * sizeof(int)); instance->options = malloc(n_it * n_op *sizeof(int)); // surallocation massive if (instance->item_name == NULL || instance->ptr == NULL || instance->options == NULL) err(1, "Impossible d'allouer la mémoire pour stocker la matrice"); enum state_t {START, ID, WHITESPACE, BAR, ENDLINE, ENDFILE}; enum state_t state = START; char buffer[256]; int i = 0; // prochain octet disponible du buffer int n = 0; // dernier octet disponible du buffer char id[65]; id[64] = 0; // sentinelle à la fin, quoi qu'il arrive int j = 0; // longueur de l'identifiant en cours de lecture int current_item = 0; while (state != ENDLINE) { enum state_t prev_state = state; if (i >= n) { n = fread(buffer, 1, 256, in); if (n == 0) { if (feof(in)) { state = ENDFILE; } if (ferror(in)) err(1, "erreur lors de la lecture de %s", in_filename); } i = 0; } if (state == ENDFILE) { // don't examine buffer[i] } else if (buffer[i] == '\n') { state = ENDLINE; } else if (buffer[i] == '|') { state = BAR; } else if (isspace(buffer[i])) { state = WHITESPACE; } else { state = ID; } // traite le caractère lu if (state == ID) { if (j == 64) errx(1, "nom d'objet trop long : %s", id); id[j] = buffer[i]; j++; } if (prev_state == ID && state != ID) { id[j] = '\0'; if (current_item == instance->n_items) errx(1, "Objet excedentaire : %s", id); for (int k = 0; k < current_item; k++) if (strcmp(id, instance->item_name[k]) == 0) errx(1, "Nom d'objets dupliqué : %s", id); instance->item_name[current_item] = malloc(j+1); strcpy(instance->item_name[current_item], id); current_item++; j = 0; } if (state == BAR) instance->n_primary = current_item; if (state == ENDFILE) errx(1, "Fin de fichier prématurée"); // passe au prochain caractère i++; } if (current_item != instance->n_items) errx(1, "Incohérence : %d objets attendus mais seulement %d fournis\n", instance->n_items, current_item); if (instance->n_primary == 0) instance->n_primary = instance->n_items; int current_option = 0; int p = 0; // pointeur courant dans instance->options instance->ptr[0] = p; bool has_primary = false; while (state != ENDFILE) { enum state_t prev_state = state; if (i >= n) { n = fread(buffer, 1, 256, in); if (n == 0) { if (feof(in)) { state = ENDFILE; } if (ferror(in)) err(1, "erreur lors de la lecture de %s", in_filename); } i = 0; } if (state == ENDFILE) { // don't examine buffer[i] } else if (buffer[i] == '\n') { state = ENDLINE; } else if (buffer[i] == '|') { state = BAR; } else if (isspace(buffer[i])) { state = WHITESPACE; } else { state = ID; } // traite le caractère lu if (state == ID) { if (j == 64) errx(1, "nom d'objet trop long : %s", id); id[j] = buffer[i]; j++; } if (prev_state == ID && state != ID) { id[j] = '\0'; // identifie le numéro de l'objet en question int item_number = -1; for (int k = 0; k < instance->n_items; k++) if (strcmp(id, instance->item_name[k]) == 0) { item_number = k; break; } if (item_number == -1) errx(1, "Objet %s inconnu dans l'option #%d", id, current_option); // détecte les objets répétés for (int k = instance->ptr[current_option]; k < p; k++) if (item_number == instance->options[k]) errx(1, "Objet %s répété dans l'option %d\n", instance->item_name[item_number], current_option); instance->options[p] = item_number; p++; has_primary |= item_is_primary(instance, item_number); j = 0; } if (state == BAR) { errx(1, "Trouvé | dans une option."); } if ((state == ENDLINE || state == ENDFILE)) { // esquive les lignes vides if (p > instance->ptr[current_option]) { if (current_option == instance->n_options) errx(1, "Option excédentaire"); if (!has_primary) errx(1, "Option %d sans objet primaire\n", current_option); current_option++; instance->ptr[current_option] = p; has_primary = false; } } // passe au prochain caractère i++; } if (current_option != instance->n_options) errx(1, "Incohérence : %d options attendues mais seulement %d fournies\n", instance->n_options, current_option); fclose(in); fprintf(stderr, "Lu %d objets (%d principaux) et %d options\n", instance->n_items, instance->n_primary, instance->n_options); return instance; } struct context_t * backtracking_setup(const struct instance_t *instance) { struct context_t *ctx = malloc(sizeof(*ctx)); if (ctx == NULL) err(1, "impossible d'allouer un contexte"); ctx->level = 0; ctx->nodes = 0; ctx->solutions = 0; int n = instance->n_items; int m = instance->n_options; ctx->active_options = malloc(n * sizeof(*ctx->active_options)); ctx->chosen_options = malloc(n * sizeof(*ctx->chosen_options)); ctx->child_num = malloc(n * sizeof(*ctx->child_num)); ctx->num_children = malloc(n * sizeof(*ctx->num_children)); if (ctx->active_options == NULL || ctx->chosen_options == NULL || ctx->child_num == NULL || ctx->num_children == NULL) err(1, "impossible d'allouer le contexte"); ctx->active_items = sparse_array_init(n); for (int item = 0; item < instance->n_primary; item++) sparse_array_add(ctx->active_items, item); for (int item = 0; item < n; item++) ctx->active_options[item] = sparse_array_init(m); for (int option = 0; option < m; option++) for (int k = instance->ptr[option]; k < instance->ptr[option + 1]; k++) { int item = instance->options[k]; sparse_array_add(ctx->active_options[item], option); } return ctx; } /** * Copie un tableau d'entiers. * * @param a tableau d'entiers * @param n taille du tableau * @return copie de a */ int *array_copy(const int *a, int n) { int *A = malloc(n * sizeof(int)); if (A == NULL) err(1, "impossible d'allouer un tableau"); for (int i = 0; i < n; i++) { A[i] = a[i]; } return A; } /** * Copie un tableau creux. * * @param s tableau creux * @return copie de s */ struct sparse_array_t *sparse_array_copy(const struct sparse_array_t *s) { struct sparse_array_t *S = malloc(sizeof(*S)); if (S == NULL) err(1, "impossible d'allouer un tableau creux"); S->len = s->len; S->capacity = s->capacity; S->p = array_copy(s->p, s->capacity); S->q = array_copy(s->q, s->capacity); return S; } /** * Crée une copie du contexte donné en argument. * * @param ctx contexte * @param n nombre d'items * @return copie de ctx */ struct context_t * copy_ctx(const struct context_t *ctx, int n) { struct context_t *ctx_copy = malloc(sizeof(*ctx_copy)); if (ctx_copy == NULL) err(1, "impossible d'allouer un contexte"); /* Copie de level, nodes et solutions */ ctx_copy->level = ctx->level; ctx_copy->nodes = ctx->nodes; ctx_copy->solutions = ctx->solutions; /* Copie de chosen_options */ ctx_copy->chosen_options = array_copy(ctx->chosen_options, n); /* Copie de child_num */ ctx_copy->child_num = array_copy(ctx->child_num, n); /* Copie de num_children */ ctx_copy->num_children = array_copy(ctx->num_children, n); /* Copie de active_items */ ctx_copy->active_items = sparse_array_copy(ctx->active_items); /* Copie de active_options */ ctx_copy->active_options = malloc(n * sizeof(*ctx_copy->active_options)); for (int item = 0; item < n; item++) ctx_copy->active_options[item] = sparse_array_copy(ctx->active_options[item]); return ctx_copy; } /** * Nettoie la mémoire pour un tableau creux. * * @param S tableau creux */ void sparse_array_free(struct sparse_array_t *S) { free(S->p); free(S->q); free(S); } /** * Nettoie la mémoire pour un contexte. * * @param ctx contexte * @param n nombre d'items */ void free_ctx(struct context_t *ctx, int n) { sparse_array_free(ctx->active_items); for (int item = 0; item < n; item++) sparse_array_free(ctx->active_options[item]); free(ctx->active_options); free(ctx->chosen_options); free(ctx->child_num); free(ctx->num_children); free(ctx); } /** * Nettoie la mémoire pour une instance. * * @param instance instance */ void free_instance(struct instance_t *instance) { if (instance->item_name != NULL) { for (int i = 0; i < instance->n_items; i++) { free(instance->item_name[i]); } free(instance->item_name); } free(instance->options); free(instance->ptr); free(instance); } void solve(const struct instance_t *instance, struct context_t *ctx) { ctx->nodes++; // if (ctx->nodes == next_report) // progress_report(ctx); if (sparse_array_empty(ctx->active_items)) { solution_found(instance, ctx); return; /* succès : plus d'objet actif */ } int chosen_item = choose_next_item(ctx); struct sparse_array_t *active_options = ctx->active_options[chosen_item]; if (sparse_array_empty(active_options)) return; /* échec : impossible de couvrir chosen_item */ cover(instance, ctx, chosen_item); ctx->num_children[ctx->level] = active_options->len; for (int k = 0; k < active_options->len; k++) { int option = active_options->p[k]; ctx->child_num[ctx->level] = k; choose_option(instance, ctx, option, chosen_item); solve(instance, ctx); if (ctx->solutions >= max_solutions) return; unchoose_option(instance, ctx, option, chosen_item); } uncover(instance, ctx, chosen_item); /* backtrack */ } /** * Crée les tâches pour trouver les solutions. * * @param instance instance */ void solve_create_tasks(const struct instance_t *instance) { /* Création d'un contexte */ struct context_t * ctx = backtracking_setup(instance); /* Début de résolution */ ctx->nodes++; int chosen_item = choose_next_item(ctx); struct sparse_array_t *active_options = ctx->active_options[chosen_item]; cover(instance, ctx, chosen_item); ctx->num_children[ctx->level] = active_options->len; for (int k = 0; k < active_options->len; k++) { int option = active_options->p[k]; /* Création de la tâche */ #pragma omp task { /* Copie du contexte */ struct context_t *ctx_copy = copy_ctx(ctx, instance->n_items); /* Choix de l'option sur la copie */ ctx_copy->child_num[ctx_copy->level] = k; choose_option(instance, ctx_copy, option, chosen_item); /* Résolution avec la copie */ solve(instance, ctx_copy); /* Mise à jour des solutions */ #pragma omp atomic solutions += ctx_copy->solutions; /* Suppression de la copie */ free_ctx(ctx_copy, instance->n_items); } } /* Suppression du contexte */ #pragma omp taskwait free_ctx(ctx, instance->n_items); } int main(int argc, char **argv) { struct option longopts[5] = { {"in", required_argument, NULL, 'i'}, {"progress-report", required_argument, NULL, 'v'}, {"print-solutions", no_argument, NULL, 'p'}, {"stop-after", required_argument, NULL, 's'}, {NULL, 0, NULL, 0} }; char ch; while ((ch = getopt_long(argc, argv, "", longopts, NULL)) != -1) { switch (ch) { case 'i': in_filename = optarg; break; case 'p': print_solutions = true; break; case 's': max_solutions = atoll(optarg); break; case 'v': report_delta = atoll(optarg); break; default: errx(1, "Unknown option\n"); } } if (in_filename == NULL) usage(argv); next_report = report_delta; /* Load instance */ struct instance_t * instance = load_matrix(in_filename); start = wtime(); #pragma omp parallel #pragma omp single solve_create_tasks(instance); /* Free instance */ free_instance(instance); printf("FINI. Trouvé %lld solutions en %.1fs\n", solutions, wtime() - start); exit(EXIT_SUCCESS); }
NeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_NG_H_ #define _SPTAG_COMMON_NG_H_ #include "../VectorIndex.h" #include "CommonUtils.h" #include "Dataset.h" #include "FineGrainedLock.h" #include "QueryResultSet.h" #include <chrono> #if defined(GPU) #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <typeinfo> #include <cuda_fp16.h> #include "inc/Core/Common/cuda/KNN.hxx" #include "inc/Core/Common/cuda/params.h" #endif namespace SPTAG { namespace COMMON { class NeighborhoodGraph { public: NeighborhoodGraph(): m_iTPTNumber(32), m_iTPTLeafSize(2000), m_iSamples(1000), m_numTopDimensionTPTSplit(5), m_iNeighborhoodSize(32), m_iNeighborhoodScale(2), m_iCEFScale(2), m_iRefineIter(2), m_iCEF(1000), m_iAddCEF(500), m_iMaxCheckForRefineGraph(10000), m_iGPUGraphType(2), m_iGPURefineSteps(0), m_iGPURefineDepth(2), m_iGPULeafSize(500), m_iGPUBatches(1) {} ~NeighborhoodGraph() {} virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0; virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0; virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { DimensionType* correct = new DimensionType[samples]; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < samples; i++) { SizeType x = COMMON::Utils::rand(m_iGraphSize); //int x = i; COMMON::QueryResultSet<void> query(nullptr, m_iCEF); for (SizeType y = 0; y < m_iGraphSize; y++) { if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue; float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y)); query.AddPoint(y, dist); } query.SortResult(); SizeType * exact_rng = new SizeType[m_iNeighborhoodSize]; RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF); correct[i] = 0; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (exact_rng[j] == -1) { correct[i] += m_iNeighborhoodSize - j; break; } for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) { correct[i]++; break; } } delete[] exact_rng; } float acc = 0; for (SizeType i = 0; i < samples; i++) acc += float(correct[i]); acc = acc / samples / m_iNeighborhoodSize; delete[] correct; return acc; } #if defined(GPU) template <typename T> void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap) { SizeType initSize; SPTAG::Helper::Convert::ConvertStringTo(index->GetParameter("NumberOfInitialDynamicPivots").c_str(), initSize); // Build the entire RNG graph, both builds the KNN and refines it to RNG buildGraph<T>(index, m_iGraphSize, m_iNeighborhoodSize, m_iTPTNumber, (int*)m_pNeighborhoodGraph[0], m_iGPURefineSteps, m_iGPURefineDepth, m_iGPUGraphType, m_iGPULeafSize, initSize); if (idmap != nullptr) { std::unordered_map<SizeType, SizeType>::const_iterator iter; for (SizeType i = 0; i < m_iGraphSize; i++) { for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if ((iter = idmap->find(m_pNeighborhoodGraph[i][j])) != idmap->end()) m_pNeighborhoodGraph[i][j] = iter->second; } } } } #else template <typename T> void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, std::vector<std::pair<SizeType, SizeType>> & leaves) { if (last - first <= m_iTPTLeafSize) { leaves.emplace_back(first, last); } else { std::vector<float> Mean(index->GetFeatureDim(), 0); int iIteration = 100; SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] += v[k]; } } for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] /= count; } std::vector<BasicResult> Variance; Variance.reserve(index->GetFeatureDim()); for (DimensionType j = 0; j < index->GetFeatureDim(); j++) { Variance.emplace_back(j, 0.0f); } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { float dist = v[k] - Mean[k]; Variance[k].Dist += dist*dist; } } std::sort(Variance.begin(), Variance.end(), COMMON::Compare); std::vector<SizeType> indexs(m_numTopDimensionTPTSplit); std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit); float bestvariance = Variance[index->GetFeatureDim() - 1].Dist; for (int i = 0; i < m_numTopDimensionTPTSplit; i++) { indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID; bestweight[i] = 0; } bestweight[0] = 1; float bestmean = Mean[indexs[0]]; std::vector<float> Val(count); for (int i = 0; i < iIteration; i++) { float sumweight = 0; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] = float(rand() % 10000) / 5000.0f - 1.0f; sumweight += weight[j] * weight[j]; } sumweight = sqrt(sumweight); for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] /= sumweight; } float mean = 0; for (SizeType j = 0; j < count; j++) { Val[j] = 0; const T* v = (const T*)index->GetSample(indices[first + j]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { Val[j] += weight[k] * v[indexs[k]]; } mean += Val[j]; } mean /= count; float var = 0; for (SizeType j = 0; j < count; j++) { float dist = Val[j] - mean; var += dist * dist; } if (var > bestvariance) { bestvariance = var; bestmean = mean; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { bestweight[j] = weight[j]; } } } SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { float val = 0; const T* v = (const T*)index->GetSample(indices[i]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { val += bestweight[k] * v[indexs[k]]; } if (val < bestmean) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } Mean.clear(); Variance.clear(); Val.clear(); indexs.clear(); weight.clear(); bestweight.clear(); PartitionByTptree<T>(index, indices, first, i - 1, leaves); PartitionByTptree<T>(index, indices, i, last, leaves); } } template <typename T> void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap) { COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity); std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize)); std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>()); for (SizeType i = 0; i < m_iGraphSize; i++) for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) (NeighborhoodDists)[i][j] = MaxDist; auto t1 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition begin\n"); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iTPTNumber; i++) { Sleep(i * 100); std::srand(clock()); for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j; std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end()); PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]); LOG(Helper::LogLevel::LL_Info, "Finish Getting Leaves for Tree %d\n", i); } LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition done\n"); auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Build TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()); for (int i = 0; i < m_iTPTNumber; i++) { #pragma omp parallel for schedule(dynamic) for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++) { SizeType start_index = TptreeLeafNodes[i][j].first; SizeType end_index = TptreeLeafNodes[i][j].second; if ((j * 5) % TptreeLeafNodes[i].size() == 0) LOG(Helper::LogLevel::LL_Info, "Processing Tree %d %d%%\n", i, static_cast<int>(j * 1.0 / TptreeLeafNodes[i].size() * 100)); for (SizeType x = start_index; x < end_index; x++) { for (SizeType y = x + 1; y <= end_index; y++) { SizeType p1 = TptreeDataIndices[i][x]; SizeType p2 = TptreeDataIndices[i][y]; float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2)); if (idmap != nullptr) { p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1); p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2); } COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize); COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize); } } } TptreeDataIndices[i].clear(); TptreeLeafNodes[i].clear(); } TptreeDataIndices.clear(); TptreeLeafNodes.clear(); auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Process TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count()); } #endif template <typename T> void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { LOG(Helper::LogLevel::LL_Info, "build RNG graph!\n"); m_iGraphSize = index->GetNumSamples(); m_iNeighborhoodSize = (DimensionType)(ceil(m_iNeighborhoodSize * m_iNeighborhoodScale)); m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity); if (m_iGraphSize < 1000) { RefineGraph<T>(index, idmap); LOG(Helper::LogLevel::LL_Info, "Build RNG Graph end!\n"); return; } auto t1 = std::chrono::high_resolution_clock::now(); BuildInitKNNGraph<T>(index, idmap); auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "BuildInitKNNGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()); RefineGraph<T>(index, idmap); if (idmap != nullptr) { for (auto iter = idmap->begin(); iter != idmap->end(); iter++) if (iter->first < 0) { m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second; } } auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "BuildGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t1).count()); } template <typename T> void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { for (int iter = 0; iter < m_iRefineIter - 1; iter++) { auto t1 = std::chrono::high_resolution_clock::now(); #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false, (int)(m_iCEF * m_iCEFScale)); if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", iter, static_cast<int>(i * 1.0 / m_iGraphSize * 100)); } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap)); } m_iNeighborhoodSize = (DimensionType)(m_iNeighborhoodSize / m_iNeighborhoodScale); if (m_iRefineIter > 0) { auto t1 = std::chrono::high_resolution_clock::now(); #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false, m_iCEF); if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", m_iRefineIter - 1, static_cast<int>(i * 1.0 / m_iGraphSize * 100)); } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap)); } } template <typename T> ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices, std::shared_ptr<Helper::DiskPriorityIO> output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::shared_ptr<NeighborhoodGraph> tmp; if (newGraph == nullptr) { tmp = NeighborhoodGraph::CreateInstance(Type()); newGraph = tmp.get(); } SizeType R = (SizeType)indices.size(); newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity); newGraph->m_iGraphSize = R; newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < R; i++) { if ((i * 5) % R == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d%%\n", static_cast<int>(i * 1.0 / R * 100)); SizeType* outnodes = newGraph->m_pNeighborhoodGraph[i]; COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1); index->RefineSearchIndex(query, false); RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1); std::unordered_map<SizeType, SizeType>::const_iterator iter; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]]; if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second; } if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end()) outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second; } if (output != nullptr) newGraph->SaveGraph(output); return ErrorCode::Success; } template <typename T> void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF) { COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1); index->RefineSearchIndex(query, searchDeleted); RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1); if (updateNeighbors) { // update neighbors for (int j = 0; j <= CEF; j++) { BasicResult* item = query.GetResult(j); if (item->VID < 0) break; if (item->VID == node) continue; InsertNeighbors(index, item->VID, node, item->Dist); } } } inline std::uint64_t BufferSize() const { return m_pNeighborhoodGraph.BufferSize(); } ErrorCode LoadGraph(std::shared_ptr<Helper::DiskPriorityIO> input, SizeType blockSize, SizeType capacity) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(input, blockSize, capacity)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ret; } ErrorCode LoadGraph(std::string sGraphFilename, SizeType blockSize, SizeType capacity) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(sGraphFilename, blockSize, capacity)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ret; } ErrorCode LoadGraph(char* pGraphMemFile, SizeType blockSize, SizeType capacity) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(pGraphMemFile, blockSize, capacity)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ErrorCode::Success; } ErrorCode SaveGraph(std::string sGraphFilename) const { LOG(Helper::LogLevel::LL_Info, "Save %s To %s\n", m_pNeighborhoodGraph.Name().c_str(), sGraphFilename.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sGraphFilename.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile; return SaveGraph(ptr); } ErrorCode SaveGraph(std::shared_ptr<Helper::DiskPriorityIO> output) const { IOBINARY(output, WriteBinary, sizeof(SizeType), (char*)&m_iGraphSize); IOBINARY(output, WriteBinary, sizeof(DimensionType), (char*)&m_iNeighborhoodSize); for (int i = 0; i < m_iGraphSize; i++) IOBINARY(output, WriteBinary, sizeof(SizeType) * m_iNeighborhoodSize, (char*)m_pNeighborhoodGraph[i]); LOG(Helper::LogLevel::LL_Info, "Save %s (%d,%d) Finish!\n", m_pNeighborhoodGraph.Name().c_str(), m_iGraphSize, m_iNeighborhoodSize); return ErrorCode::Success; } inline ErrorCode AddBatch(SizeType num) { ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num); if (ret != ErrorCode::Success) return ret; m_iGraphSize += num; return ErrorCode::Success; } inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; } inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; } void Update(SizeType row, DimensionType col, SizeType val) { std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]); m_pNeighborhoodGraph[row][col] = val; } inline void SetR(SizeType rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; } inline SizeType R() const { return m_iGraphSize; } inline std::string Type() const { return m_pNeighborhoodGraph.Name(); } static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type); protected: // Graph structure SizeType m_iGraphSize; COMMON::Dataset<SizeType> m_pNeighborhoodGraph; FineGrainedLock m_dataUpdateLock; public: int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit; DimensionType m_iNeighborhoodSize; float m_iNeighborhoodScale, m_iCEFScale; int m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph, m_iGPUGraphType, m_iGPURefineSteps, m_iGPURefineDepth, m_iGPULeafSize, m_iGPUBatches; }; } } #endif
GB_binop__le_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_fp64) // A*D function (colscale): GB (_AxD__le_fp64) // D*A function (rowscale): GB (_DxB__le_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__le_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__le_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_fp64) // C=scalar+B GB (_bind1st__le_fp64) // C=scalar+B' GB (_bind1st_tran__le_fp64) // C=A+scalar GB (_bind2nd__le_fp64) // C=A'+scalar GB (_bind2nd_tran__le_fp64) // C type: bool // A type: double // B,b type: double // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_FP64 || GxB_NO_LE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ellipticBuildJacobi.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagTri3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A); void BuildLocalIpdgDiagQuad3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A); void BuildLocalIpdgDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A); void BuildLocalContinuousDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *A); void BuildLocalContinuousDiagQuad3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *A); void BuildLocalContinuousDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A); void ellipticBuildJacobi(elliptic_t* elliptic, dfloat lambda, dfloat **invDiagA){ mesh_t *mesh = elliptic->mesh; setupAide options = elliptic->options; // surface mass matrices MS = MM*LIFT dfloat *MS = (dfloat *) calloc(mesh->Nfaces*mesh->Nfp*mesh->Nfp,sizeof(dfloat)); for (int f=0;f<mesh->Nfaces;f++) { for (int n=0;n<mesh->Nfp;n++) { int fn = mesh->faceNodes[f*mesh->Nfp+n]; for (int m=0;m<mesh->Nfp;m++) { dfloat MSnm = 0; for (int i=0;i<mesh->Np;i++){ MSnm += mesh->MM[fn+i*mesh->Np]*mesh->LIFT[i*mesh->Nfp*mesh->Nfaces+f*mesh->Nfp+m]; } MS[m+n*mesh->Nfp + f*mesh->Nfp*mesh->Nfp] = MSnm; } } } // build some monolithic basis arrays (for quads and hexes) dfloat *B = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Br = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bs = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bt = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); if (elliptic->elementType==QUADRILATERALS) { int mode = 0; for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; ++node; } } ++mode; } } } if (elliptic->elementType==HEXAHEDRA) { int mode = 0; for(int nk=0;nk<mesh->N+1;++nk){ for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int k=0;k<mesh->N+1;++k){ for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nk==k && nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j && nk==k) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i && nk==k) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; if(ni==i && nj==j) Bt[mode*mesh->Np+node] = mesh->D[nk+mesh->Nq*k]; ++node; } } } ++mode; } } } } dlong diagNnum = mesh->Np*mesh->Nelements; dfloat *diagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); if(mesh->rank==0) printf("Building diagonal...");fflush(stdout); if (options.compareArgs("DISCRETIZATION","IPDG")) { switch(elliptic->elementType){ case TRIANGLES: if (options.compareArgs("BASIS","BERN")) { #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgBBDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } else { if(mesh->dim==2){ #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM){ BuildLocalIpdgDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } } else{ #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM){ BuildLocalIpdgDiagTri3D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } } } break; case QUADRILATERALS: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagQuad2D(elliptic, mesh, lambda, MS, B, Br, Bs, eM, diagA + eM*mesh->Np); // TW: MISSING break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTet3D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagHex3D(elliptic, mesh, lambda, MS, B, Br, Bs, Bt, eM, diagA + eM*mesh->Np); break; } } else if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) { switch(elliptic->elementType){ case TRIANGLES: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTri2D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case QUADRILATERALS:{ #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM){ if(elliptic->dim==2) BuildLocalContinuousDiagQuad2D(elliptic, mesh, lambda, eM, B, Br, Bs, diagA + eM*mesh->Np); if(elliptic->dim==3) BuildLocalContinuousDiagQuad3D(elliptic, mesh, lambda, eM, B, Br, Bs, diagA + eM*mesh->Np); } }break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTet3D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagHex3D(elliptic, mesh, lambda, eM, B, Br, Bs, Bt, diagA + eM*mesh->Np); break; } } if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) ogsGatherScatter(diagA, ogsDfloat, ogsAdd, elliptic->ogs); *invDiagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); for (dlong n=0;n<mesh->Nelements*mesh->Np;n++) { (*invDiagA)[n] = 1/diagA[n]; } if(mesh->rank==0) printf("done.\n"); free(diagA); free(MS); free(B); free(Br); free(Bs); free(Bt); } void BuildLocalIpdgDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat J = mesh->vgeo[vbase+JID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*drdx*drdx*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdx*dsdx*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdx*drdx*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdx*dsdx*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdy*drdy*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdy*dsdy*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdy*drdy*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdy*dsdy*mesh->Sss[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM == nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; } } } } } } void BuildLocalIpdgDiagTri3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat drdz = mesh->vgeo[vbase+RZID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat dsdz = mesh->vgeo[vbase+SZID]; dfloat J = mesh->vgeo[vbase+JID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*drdx*drdx*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdx*dsdx*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdx*drdx*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdx*dsdx*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdy*drdy*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdy*dsdy*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdy*drdy*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdy*dsdy*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdz*drdz*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdz*dsdz*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdz*drdz*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdz*dsdz*mesh->Sss[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat nz = mesh->sgeo[sid+NZID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM == nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM]; dfloat DzMim = drdz*mesh->Dr[iM*mesh->Np+nM] + dsdz*mesh->Ds[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; A[nM] += -0.5*nz*(1+bcD)*(1-bcN)*MSfni*DzMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n]; dfloat DzMin = drdz*mesh->Dr[iM*mesh->Np+n] + dsdz*mesh->Ds[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DzMin*MSfim; } } } } } } void BuildLocalIpdgPatchAxTri2D(elliptic_t* elliptic, mesh_t* mesh, int basisNp, dfloat *basis, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); //generate the BB diagonal by extracting it from the transformed patch void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dfloat *patchA = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); int basisNp = mesh->Np; dfloat *basis = mesh->VB; BuildLocalIpdgPatchAxTri2D(elliptic, mesh, basisNp, basis, lambda, MS, eM, patchA); for(int n=0;n<mesh->Np;++n) { A[n] = patchA[n*mesh->Np+n]; //store the diagonal entry } free(patchA); } //returns the continuous C0 patch A matrix for element eM void BuildLocalContinuousDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat* Bs, dfloat *A) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int iid = nx+ny*mesh->Nq; if (elliptic->mapB[nx+ny*mesh->Nq+eM*mesh->Np]!=1) { A[iid] = 0; for (int k=0;k<mesh->Nq;k++) { int id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; A[iid] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; A[iid] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } int id = nx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; A[iid] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat JW = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; A[iid] += JW*lambda; } else { A[iid] = 1; //just put a 1 so A is invertable } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagQuad3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat drdz = mesh->vgeo[base+mesh->Np*RZID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat dsdz = mesh->vgeo[base+mesh->Np*SZID]; dfloat dtdx = mesh->vgeo[base+mesh->Np*TXID]; dfloat dtdy = mesh->vgeo[base+mesh->Np*TYID]; dfloat dtdz = mesh->vgeo[base+mesh->Np*TZID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn] + dtdx; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn] + dtdy; dfloat dlndz = drdz*Br[idn] + dsdz*Bs[idn] + dtdz; A[n] += JW*(dlndx*dlndx + dlndy*dlndy + dlndz*dlndz); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat drdzM = mesh->vgeo[baseM+mesh->Np*RZID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; dfloat dsdzM = mesh->vgeo[baseM+mesh->Np*SZID]; dfloat dtdxM = mesh->vgeo[baseM+mesh->Np*TXID]; dfloat dtdyM = mesh->vgeo[baseM+mesh->Np*TYID]; dfloat dtdzM = mesh->vgeo[baseM+mesh->Np*TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat nz = mesh->sgeo[base+NZID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM] + dtdxM; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM] + dtdyM; dfloat dlndzM = drdzM*Br[idnM] + dsdzM*Bs[idnM] + dtdzM; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM+nz*dlndzM ; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; A[n] += -0.5*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagQuad3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat* Bs, dfloat *A) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int iid = nx+ny*mesh->Nq; A[iid] = 0; for (int k=0;k<mesh->Nq;k++) { int id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; A[iid] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; A[iid] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } int id = nx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; A[iid] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; // id = nx+ny*mesh->Nq; // dfloat Grt = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G02ID*mesh->Np]; // A[iid] += 2*Grt*mesh->D[nx+nx*mesh->Nq]; // id = nx+ny*mesh->Nq; // dfloat Gst = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G12ID*mesh->Np]; // A[iid] += 2*Gst*mesh->D[ny+ny*mesh->Nq]; // dfloat Gtt = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G22ID*mesh->Np]; // A[iid] += Gtt; dfloat JW = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; A[iid] += JW*lambda; } } } void BuildLocalIpdgDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat drdz = mesh->vgeo[vbase+RZID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat dsdz = mesh->vgeo[vbase+SZID]; dfloat dtdx = mesh->vgeo[vbase+TXID]; dfloat dtdy = mesh->vgeo[vbase+TYID]; dfloat dtdz = mesh->vgeo[vbase+TZID]; dfloat J = mesh->vgeo[vbase+JID]; dfloat G00 = drdx*drdx + drdy*drdy + drdz*drdz; dfloat G01 = drdx*dsdx + drdy*dsdy + drdz*dsdz; dfloat G02 = drdx*dtdx + drdy*dtdy + drdz*dtdz; dfloat G10 = dsdx*drdx + dsdy*drdy + dsdz*drdz; dfloat G11 = dsdx*dsdx + dsdy*dsdy + dsdz*dsdz; dfloat G12 = dsdx*dtdx + dsdy*dtdy + dsdz*dtdz; dfloat G20 = dtdx*drdx + dtdy*drdy + dtdz*drdz; dfloat G21 = dtdx*dsdx + dtdy*dsdy + dtdz*dsdz; dfloat G22 = dtdx*dtdx + dtdy*dtdy + dtdz*dtdz; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*G00*mesh->Srr[n*mesh->Np+n]; A[n] += J*G01*mesh->Srs[n*mesh->Np+n]; A[n] += J*G02*mesh->Srt[n*mesh->Np+n]; A[n] += J*G10*mesh->Ssr[n*mesh->Np+n]; A[n] += J*G11*mesh->Sss[n*mesh->Np+n]; A[n] += J*G12*mesh->Sst[n*mesh->Np+n]; A[n] += J*G20*mesh->Str[n*mesh->Np+n]; A[n] += J*G21*mesh->Sts[n*mesh->Np+n]; A[n] += J*G22*mesh->Stt[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat nz = mesh->sgeo[sid+NZID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ for(int m=0;m<mesh->Nfp;++m){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM] + dtdx*mesh->Dt[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM] + dtdy*mesh->Dt[iM*mesh->Np+nM]; dfloat DzMim = drdz*mesh->Dr[iM*mesh->Np+nM] + dsdz*mesh->Ds[iM*mesh->Np+nM] + dtdz*mesh->Dt[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; A[nM] += -0.5*nz*(1+bcD)*(1-bcN)*MSfni*DzMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n] + dtdx*mesh->Dt[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n] + dtdy*mesh->Dt[iM*mesh->Np+n]; dfloat DzMin = drdz*mesh->Dr[iM*mesh->Np+n] + dsdz*mesh->Ds[iM*mesh->Np+n] + dtdz*mesh->Dt[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; A[n] += -0.5*nz*(1+bcD)*(1-bcN)*DzMin*MSfim; } } } } } } void BuildLocalContinuousDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Grt = mesh->ggeo[gbase + G02ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat Gst = mesh->ggeo[gbase + G12ID]; dfloat Gtt = mesh->ggeo[gbase + G22ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grt*mesh->Srt[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; A[n] += Gst*mesh->Sst[n+n*mesh->Np]; A[n] += Grt*mesh->Str[n+n*mesh->Np]; A[n] += Gst*mesh->Sts[n+n*mesh->Np]; A[n] += Gtt*mesh->Stt[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat drdz = mesh->vgeo[base+mesh->Np*RZID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat dsdz = mesh->vgeo[base+mesh->Np*SZID]; dfloat dtdx = mesh->vgeo[base+mesh->Np*TXID]; dfloat dtdy = mesh->vgeo[base+mesh->Np*TYID]; dfloat dtdz = mesh->vgeo[base+mesh->Np*TZID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn] + dtdx*Bt[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn] + dtdy*Bt[idn]; dfloat dlndz = drdz*Br[idn] + dsdz*Bs[idn] + dtdz*Bt[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy+dlndz*dlndz); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat drdzM = mesh->vgeo[baseM+mesh->Np*RZID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; dfloat dsdzM = mesh->vgeo[baseM+mesh->Np*SZID]; dfloat dtdxM = mesh->vgeo[baseM+mesh->Np*TXID]; dfloat dtdyM = mesh->vgeo[baseM+mesh->Np*TYID]; dfloat dtdzM = mesh->vgeo[baseM+mesh->Np*TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat nz = mesh->sgeo[base+NZID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM] + dtdxM*Bt[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM] + dtdyM*Bt[idnM]; dfloat dlndzM = drdzM*Br[idnM] + dsdzM*Bs[idnM] + dtdzM*Bt[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM+nz*dlndzM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A) { for (int nz=0;nz<mesh->Nq;nz++) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int idn = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; if (elliptic->mapB[idn+eM*mesh->Np]!=1) { A[idn] = 0; int id = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dlong base = eM*mesh->Np*mesh->Nggeo; dfloat Grs = mesh->ggeo[base + id + G01ID*mesh->Np]; A[idn] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat Grt = mesh->ggeo[base + id + G02ID*mesh->Np]; A[idn] += 2*Grt*mesh->D[nx+nx*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; dfloat Gst = mesh->ggeo[base + id + G12ID*mesh->Np]; A[idn] += 2*Gst*mesh->D[ny+ny*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; for (int k=0;k<mesh->Nq;k++) { int iid = k+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grr = mesh->ggeo[base + iid + G00ID*mesh->Np]; A[idn] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+k*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gss = mesh->ggeo[base + iid + G11ID*mesh->Np]; A[idn] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+ny*mesh->Nq+k*mesh->Nq*mesh->Nq; dfloat Gtt = mesh->ggeo[base + iid + G22ID*mesh->Np]; A[idn] += Gtt*mesh->D[nz+k*mesh->Nq]*mesh->D[nz+k*mesh->Nq]; } dfloat JW = mesh->ggeo[base + id + GWJID*mesh->Np]; A[idn] += JW*lambda; } else { A[idn] = 1; //just put a 1 so A is invertable } } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } }
GB_binop__ldexp_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ldexp_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__ldexp_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__ldexp_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp64) // C=scalar+B GB (_bind1st__ldexp_fp64) // C=scalar+B' GB (_bind1st_tran__ldexp_fp64) // C=A+scalar GB (_bind2nd__ldexp_fp64) // C=A'+scalar GB (_bind2nd_tran__ldexp_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = ldexp (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ldexp (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LDEXP || GxB_NO_FP64 || GxB_NO_LDEXP_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ldexp_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ldexp_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ldexp_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ldexp_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ldexp_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = ldexp (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ldexp_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = ldexp (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexp (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ldexp_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexp (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ldexp_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dynamic_smagorinsky_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // // System includes #include <vector> #include <map> // External includes #include "boost/smart_ptr.hpp" // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "utilities/openmp_utils.h" #include "utilities/geometry_utilities.h" #include "includes/cfd_variables.h" #include "fluid_dynamics_application_variables.h" #ifndef KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED #define KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /// Helper class to dynamically determine a value for the Smagorinsly parameter. /** This class uses the Variational Germano Identity to determine a value for the Smagorinsky parameter. This value is stored in the elemental variable C_SMAGORINSKY, the element implementation is responsible for using it. The ability to assign different values to different patches of elements (identified by the PATCH_INDEX variable) is supported, although it tends to produce unreliable results due to a 0/0 indetermination in patches with smooth velocity fields. This class is based in Oberai, A.A. and Wanderer, J., Variational formulation of the Germano identity for the Navier Stokes equations, Journal of Turbulence, 2005, vol 6. Note that the formulation described there requires a nested mesh. It takes the model part containing a coarse mesh as input and assumes that all elements will be subdivided before CalculateC() is called. Remember to call StoreCoarseMesh before refining the element, otherwise the coarse mesh will be lost. @see VMS for an element implementation that uses the Smagorinsky model. @see Local_Refine_Triangle_Mesh,Local_Refine_Tetrahedra_Mesh for the element refinement process. */ class DynamicSmagorinskyUtils { public: ///@name Life Cycle ///@{ /// Constructor /** @param rModelPart Reference to the model part containing the coarse mesh @param DomainSize Spatial dimension (2 or 3) */ DynamicSmagorinskyUtils(ModelPart& rModelPart, unsigned int DomainSize): mrModelPart(rModelPart), mDomainSize(DomainSize), mCoarseMesh(), mPatchIndices() {} /// Destructor ~DynamicSmagorinskyUtils() {} ///@} ///@name Operations ///@{ /// Store current mesh as coarse mesh. Call before refining. /** If you are refining more than once, this only has to be called before last refinement. */ void StoreCoarseMesh() { // Clear existing mesh (if any) mCoarseMesh.clear(); // Store current mesh for( ModelPart::ElementsContainerType::ptr_iterator itpElem = mrModelPart.Elements().ptr_begin(); itpElem != mrModelPart.Elements().ptr_end(); ++itpElem) { // (*itpElem)->GetValue(C_SMAGORINSKY) = 0.0; // Set the Smagorinsky parameter to zero for the coarse mesh (do this once to reset any input values) mCoarseMesh.push_back(*itpElem); } // Count the number of patches in the model (in parallel) const int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mCoarseMesh.size(),NumThreads,ElementPartition); std::vector< std::vector<int> > LocalIndices(NumThreads); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator ElemBegin = mCoarseMesh.begin() + ElementPartition[k]; ModelPart::ElementsContainerType::iterator ElemEnd = mCoarseMesh.begin() + ElementPartition[k+1]; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { this->AddNewIndex(LocalIndices[k],itElem->GetValue(PATCH_INDEX)); } } // Combine the partial lists and create a map for PATCH_INDEX -> Vector position unsigned int Counter = 0; std::pair<int, unsigned int> NewVal; std::pair< std::map<int, unsigned int>::iterator, bool > Result; for( std::vector< std::vector<int> >::iterator itList = LocalIndices.begin(); itList != LocalIndices.end(); ++itList ) { for( std::vector<int>::iterator itIndex = itList->begin(); itIndex != itList->end(); ++itIndex) { // Note that instering in map already sorts and checks for uniqueness NewVal.first = *itIndex; NewVal.second = Counter; Result = mPatchIndices.insert(NewVal); if (Result.second) ++Counter; } } } /// Provide a value for the Smagorinsky coefficient using the Variational Germano Identity void CalculateC() { // Update the velocity values for the terms that belong to the coarse mesh this->SetCoarseVel(); // Partitioning const int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector CoarseElementPartition,FineElementPartition; OpenMPUtils::DivideInPartitions(mCoarseMesh.size(),NumThreads,CoarseElementPartition); OpenMPUtils::DivideInPartitions(mrModelPart.Elements().size(),NumThreads,FineElementPartition); // Initialize temporary containers unsigned int PatchNumber = mPatchIndices.size(); std::vector< std::vector<double> > GlobalPatchNum(NumThreads); // Numerator on each patch std::vector< std::vector<double> > GlobalPatchDen(NumThreads); // Denominator on each patch const double EnergyTol = 0.005; double TotalDissipation = 0; #pragma omp parallel reduction(+:TotalDissipation) { int k = OpenMPUtils::ThisThread(); // Initialize the iterator boundaries for this thread ModelPart::ElementsContainerType::iterator CoarseElemBegin = mCoarseMesh.begin() + CoarseElementPartition[k]; ModelPart::ElementsContainerType::iterator CoarseElemEnd = mCoarseMesh.begin() + CoarseElementPartition[k+1]; ModelPart::ElementsContainerType::iterator FineElemBegin = mrModelPart.ElementsBegin() + FineElementPartition[k]; ModelPart::ElementsContainerType::iterator FineElemEnd = mrModelPart.ElementsBegin() + FineElementPartition[k+1]; // Initialize some thread-local variables Vector LocalValues, LocalCoarseVel; Matrix LocalMassMatrix; ProcessInfo& rProcessInfo = mrModelPart.GetProcessInfo(); double Residual,Model; unsigned int PatchPosition; // Thread-local containers for the values in each patch std::vector<double>& rPatchNum = GlobalPatchNum[k]; std::vector<double>& rPatchDen = GlobalPatchDen[k]; rPatchNum.resize(PatchNumber,0.0);// Fill with zeros rPatchDen.resize(PatchNumber,0.0); if (mDomainSize == 2) { LocalValues.resize(9); LocalCoarseVel.resize(9); LocalMassMatrix.resize(9,9,false); array_1d<double,3> N; BoundedMatrix<double,3,2> DN_DX; BoundedMatrix<double,2,2> dv_dx; // Evaluate the N-S and model terms in each coarse element for( ModelPart::ElementsContainerType::iterator itElem = CoarseElemBegin; itElem != CoarseElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms2D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] += Residual; rPatchDen[PatchPosition] += Model; TotalDissipation += Residual; } // Now evaluate the corresponding terms in the fine mesh for( ModelPart::ElementsContainerType::iterator itElem = FineElemBegin; itElem != FineElemEnd; ++itElem) { // Deactivate Smagorinsky to compute the residual of galerkin+stabilization terms only itElem->GetValue(C_SMAGORINSKY) = 0.0; PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms2D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] -= Residual; rPatchDen[PatchPosition] -= Model; } } else // mDomainSize == 3 { LocalValues.resize(16); LocalCoarseVel.resize(16); LocalMassMatrix.resize(16,16,false); array_1d<double,4> N; BoundedMatrix<double,4,3> DN_DX; BoundedMatrix<double,3,3> dv_dx; // Evaluate the N-S and model terms in each coarse element for( ModelPart::ElementsContainerType::iterator itElem = CoarseElemBegin; itElem != CoarseElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms3D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] += Residual; rPatchDen[PatchPosition] += Model; TotalDissipation += Residual; } // Now evaluate the corresponding terms in the fine mesh for( ModelPart::ElementsContainerType::iterator itElem = FineElemBegin; itElem != FineElemEnd; ++itElem) { // Deactivate Smagorinsky to compute the residual of galerkin+stabilization terms only itElem->GetValue(C_SMAGORINSKY) = 0.0; PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms3D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] -= Residual; rPatchDen[PatchPosition] -= Model; } } } // Combine the results of each thread in position 0 for( std::vector< std::vector<double> >::iterator itNum = GlobalPatchNum.begin()+1, itDen = GlobalPatchDen.begin()+1; itNum != GlobalPatchNum.end(); ++itNum, ++itDen) { for( std::vector<double>::iterator TotalNum = GlobalPatchNum[0].begin(), LocalNum = itNum->begin(), TotalDen = GlobalPatchDen[0].begin(), LocalDen = itDen->begin(); TotalNum != GlobalPatchNum[0].end(); ++TotalNum,++LocalNum,++TotalDen,++LocalDen) { *TotalNum += *LocalNum; *TotalDen += *LocalDen; } } // Compute the smagorinsky coefficient for each patch by combining the values from each thread std::vector<double> PatchC(PatchNumber); double NumTol = EnergyTol * fabs(TotalDissipation); for( std::vector<double>::iterator itNum = GlobalPatchNum[0].begin(), itDen = GlobalPatchDen[0].begin(), itC = PatchC.begin(); itC != PatchC.end(); ++itNum, ++itDen, ++itC) { // If the dissipation we are "missing" by not considering Smagorinsky is small, do not use Smagorinsky (this avoids a division by ~0, as the denominator should go to zero too) if ( (fabs(*itNum) < NumTol) )//|| (fabs(*itDen) < 1.0e-12) ) *itC = 0.0; else *itC = sqrt( 0.5 * fabs( *itNum / *itDen ) ); } // Finally, assign each element its new smagorinsky value #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator ElemBegin = mrModelPart.ElementsBegin() + FineElementPartition[k]; ModelPart::ElementsContainerType::iterator ElemEnd = mrModelPart.ElementsBegin() + FineElementPartition[k+1]; unsigned int PatchPosition; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; itElem->GetValue(C_SMAGORINSKY) = PatchC[PatchPosition]; } } } /// For the bridge analysis problem, correct the boundary flag after the refinement. /** Remember to run this AFTER EACH REFINEMENT STEP Possible values for the variable: 1.0 inlet, 2.0 bridge surface, 3.0 outlet, 0.0 otherwise @param rThisVariable The Kratos variable used to identify the boundary */ void CorrectFlagValues(Variable<double>& rThisVariable = FLAG_VARIABLE) { // Loop over coarse mesh to evaluate all terms that do not involve the fine mesh const int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfNodes(),NumThreads,NodePartition); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = mrModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = mrModelPart.NodesBegin() + NodePartition[k+1]; double Value0, Value1; for( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if( itNode->GetValue(FATHER_NODES).size() == 2 ) // If the node is refined { Value0 = itNode->GetValue(FATHER_NODES)[0].FastGetSolutionStepValue(rThisVariable); Value1 = itNode->GetValue(FATHER_NODES)[1].FastGetSolutionStepValue(rThisVariable); if( Value0 != Value1 ) // If this node is problematic { if ( Value0 == 0.0 || Value1 == 0.0 ) { // if either of the parents is not on the boundary, this node is not on the boundary itNode->FastGetSolutionStepValue(rThisVariable) = 0.0; } /* All remaining cases are unlikely in well-posed problems, I'm arbitrarily giving priority to the outlet, so that the node is only inlet or bridge surface if both parents are */ else if( Value0 == 3.0 ) { itNode->FastGetSolutionStepValue(rThisVariable) = Value0; } else if( Value1 == 3.0 ) { // The node is only bridge surface if both parents are itNode->FastGetSolutionStepValue(rThisVariable) = Value1; } else // Default behaviour: Parent 0 takes precedence { itNode->FastGetSolutionStepValue(rThisVariable) = Value0; } } } } } } ///@} private: ///@name Member Variables ///@{ /// ModelPart of the fluid problem ModelPart& mrModelPart; /// Spatial dimenstion unsigned int mDomainSize; /// Container for the coarse mesh (the fine mesh is stored by the model part) ModelPart::ElementsContainerType mCoarseMesh; /// A map relating patch indices to positions in the internal storage arrays std::map<int, unsigned int> mPatchIndices; ///@name Private Operations ///@{ /// Calculate the "Coarse Mesh" velocity /** The operations on the coarse mesh are evaluated on the fine mesh, but using an averaged velocity on the nodes that only exist on the fine mesh. Velocity gradients calculated on the fine mesh using this average velocity will be equal to those that would be obtained using the coarse mesh. This function assigns the "coarse" velocity value to all nodes */ void SetCoarseVel() { /* Note: This loop can't be parallelized, as we are relying on the fact that refined nodes are at the end of the list and their parents will be updated before the refined nodes are reached. There is an alternative solution (always calculate the coarse mesh velocity from the historic database) which can be parallelized but won't work for multiple levels of refinement */ for( ModelPart::NodeIterator itNode = mrModelPart.NodesBegin(); itNode != mrModelPart.NodesEnd(); ++itNode) { if( itNode->GetValue(FATHER_NODES).size() == 2 ) { Node<3>& rParent1 = itNode->GetValue(FATHER_NODES)[0]; Node<3>& rParent2 = itNode->GetValue(FATHER_NODES)[1]; itNode->GetValue(COARSE_VELOCITY) = 0.5 * ( rParent1.FastGetSolutionStepValue(VELOCITY) + rParent2.FastGetSolutionStepValue(VELOCITY) ); } else { itNode->GetValue(COARSE_VELOCITY) = itNode->FastGetSolutionStepValue(VELOCITY); } } } /// Return the Galerkin (+stabilization) and Model terms for this element (2D version) void GermanoTerms2D(Element& rElem, array_1d<double,3>& rShapeFunc, BoundedMatrix<double,3,2>& rShapeDeriv, BoundedMatrix<double,2,2>& rGradient, Vector& rNodalResidualContainer, Vector& rNodalVelocityContainer, Matrix& rMassMatrix, ProcessInfo& rProcessInfo, double& rResidual, double& rModel) { const double Dim = 2; const double NumNodes = 3; // Initialize double Area; double Density = 0.0; rGradient = ZeroMatrix(Dim,Dim); rResidual = 0.0; rModel = 0.0; // Calculate the residual this->CalculateResidual(rElem,rMassMatrix,rNodalVelocityContainer,rNodalResidualContainer,rProcessInfo); // We use rNodalVelocityContainer as an auxiliaty variable this->GetCoarseVelocity2D(rElem,rNodalVelocityContainer); for( Vector::iterator itRHS = rNodalResidualContainer.begin(), itVel = rNodalVelocityContainer.begin(); itRHS != rNodalResidualContainer.end(); ++itRHS, ++itVel) rResidual += (*itVel) * (*itRHS); // Calculate the model term GeometryUtils::CalculateGeometryData( rElem.GetGeometry(), rShapeDeriv, rShapeFunc, Area); // Compute Grad(u), Density and < Grad(w), Grad(u) > for (unsigned int j = 0; j < NumNodes; ++j) // Columns of <Grad(Ni),Grad(Nj)> { Density += rShapeFunc[j] * rElem.GetGeometry()[j].FastGetSolutionStepValue(DENSITY); const array_1d< double,3 >& rNodeVel = rElem.GetGeometry()[j].FastGetSolutionStepValue(VELOCITY); // Nodal velocity for (unsigned int i = 0; i < NumNodes; ++i) // Rows of <Grad(Ni),Grad(Nj)> { const array_1d< double,3 >& rNodeTest = rElem.GetGeometry()[i].GetValue(COARSE_VELOCITY); // Test function (particularized to coarse velocity) for (unsigned int k = 0; k < Dim; ++k) // Space Dimensions rModel += rNodeTest[k] * rShapeDeriv(i,k) * rShapeDeriv(j,k) * rNodeVel[k]; } for (unsigned int m = 0; m < Dim; ++m) // Calculate symmetric gradient { for (unsigned int n = 0; n < m; ++n) // Off-diagonal rGradient(m,n) += 0.5 * (rShapeDeriv(j,n) * rNodeVel[m] + rShapeDeriv(j,m) * rNodeVel[n]); // Symmetric gradient, only lower half is written rGradient(m,m) += rShapeDeriv(j,m) * rNodeVel[m]; // Diagonal } } rModel *= Area; // To this point, rModel contains the integral over the element of Grad(U_coarse):Grad(U) // Norm[ Grad(u) ] double SqNorm = 0.0; for (unsigned int i = 0; i < Dim; ++i) { for (unsigned int j = 0; j < i; ++j) SqNorm += 2.0 * rGradient(i,j) * rGradient(i,j); // Adding off-diagonal terms (twice, as matrix is symmetric) SqNorm += rGradient(i,i) * rGradient(i,i); // Diagonal terms } // "Fixed" part of Smagorinsky viscosity: Density * FilterWidth^2 * Norm(SymmetricGrad(U)). 2*C^2 is accounted for in the caller function const double sqH = 2*Area; rModel *= Density * sqH * sqrt(SqNorm); } /// Return the Galerkin (+stabilization) and Model terms for this element (3D version) void GermanoTerms3D(Element& rElem, array_1d<double,4>& rShapeFunc, BoundedMatrix<double,4,3>& rShapeDeriv, BoundedMatrix<double,3,3>& rGradient, Vector& rNodalResidualContainer, Vector& rNodalVelocityContainer, Matrix& rMassMatrix, ProcessInfo& rProcessInfo, double& rResidual, double& rModel) { const double Dim = 3; const double NumNodes = 4; // Initialize double Volume; double Density = 0.0; rGradient = ZeroMatrix(Dim,Dim); rResidual = 0.0; rModel = 0.0; // Calculate the residual this->CalculateResidual(rElem,rMassMatrix,rNodalVelocityContainer,rNodalResidualContainer,rProcessInfo); // We use rNodalVelocityContainer as an auxiliaty variable this->GetCoarseVelocity3D(rElem,rNodalVelocityContainer); for( Vector::iterator itRHS = rNodalResidualContainer.begin(), itVel = rNodalVelocityContainer.begin(); itRHS != rNodalResidualContainer.end(); ++itRHS, ++itVel) rResidual += (*itVel) * (*itRHS); // Calculate the model term GeometryUtils::CalculateGeometryData( rElem.GetGeometry(), rShapeDeriv, rShapeFunc, Volume); // Compute Grad(u), Density and < Grad(w), Grad(u) > for (unsigned int j = 0; j < NumNodes; ++j) // Columns of <Grad(Ni),Grad(Nj)> { Density += rShapeFunc[j] * rElem.GetGeometry()[j].FastGetSolutionStepValue(DENSITY); const array_1d< double,3 >& rNodeVel = rElem.GetGeometry()[j].FastGetSolutionStepValue(VELOCITY); // Nodal velocity for (unsigned int i = 0; i < NumNodes; ++i) // Rows of <Grad(Ni),Grad(Nj)> { const array_1d< double,3 >& rNodeTest = rElem.GetGeometry()[i].GetValue(COARSE_VELOCITY); // Test function (particularized to coarse velocity) for (unsigned int k = 0; k < Dim; ++k) // Space Dimensions rModel += rNodeTest[k] * rShapeDeriv(i,k) * rShapeDeriv(j,k) * rNodeVel[k]; } for (unsigned int m = 0; m < Dim; ++m) // Calculate symmetric gradient { for (unsigned int n = 0; n < m; ++n) // Off-diagonal rGradient(m,n) += 0.5 * (rShapeDeriv(j,n) * rNodeVel[m] + rShapeDeriv(j,m) * rNodeVel[n]); // Symmetric gradient, only lower half is written rGradient(m,m) += rShapeDeriv(j,m) * rNodeVel[m]; // Diagonal } } rModel *= Volume; // To this point, rModel contains the integral over the element of Grad(U_coarse):Grad(U) // Norm[ Symmetric Grad(u) ] = ( 2 * Sij * Sij )^(1/2), we compute the Sij * Sij part in the following loop: double SqNorm = 0.0; for (unsigned int i = 0; i < Dim; ++i) { for (unsigned int j = 0; j < i; ++j) SqNorm += 2.0 * rGradient(i,j) * rGradient(i,j); // Adding off-diagonal terms (twice, as matrix is symmetric) SqNorm += rGradient(i,i) * rGradient(i,i); // Diagonal terms } const double cubeH = 6*Volume; rModel *= Density * pow(cubeH, 2.0/3.0) * sqrt(2.0 * SqNorm); } /// Equivalent to VMS2DSmagorinsky::GetFirstDerivativesVector(), using the velocity evaluated on the coarse mesh void GetCoarseVelocity2D(Element& rElement, Vector& rVar) { unsigned int LocalIndex = 0; const Element::GeometryType& rGeom = rElement.GetGeometry(); for (unsigned int itNode = 0; itNode < 3; ++itNode) { const array_1d< double,3>& rCoarseVel = rGeom[itNode].GetValue(COARSE_VELOCITY); rVar[LocalIndex++] = rCoarseVel[0]; rVar[LocalIndex++] = rCoarseVel[1]; rVar[LocalIndex++] = 0.0; // Pressure Dof } } /// Equivalent to VMS3DSmagorinsky::GetFirstDerivativesVector(), using the velocity evaluated on the coarse mesh void GetCoarseVelocity3D(Element& rElement, Vector& rVar) { unsigned int LocalIndex = 0; const Element::GeometryType& rGeom = rElement.GetGeometry(); for (unsigned int itNode = 0; itNode < 4; ++itNode) { const array_1d< double,3>& rCoarseVel = rGeom[itNode].GetValue(COARSE_VELOCITY); rVar[LocalIndex++] = rCoarseVel[0]; rVar[LocalIndex++] = rCoarseVel[1]; rVar[LocalIndex++] = rCoarseVel[2]; rVar[LocalIndex++] = 0.0; // Pressure Dof } } /// Call the element's member functions to obtain its residual void CalculateResidual(Element& rElement, Matrix& rMassMatrix, ///@todo This matrix and the next vector should be transformed to static members once we find a threadsafe way to do so Vector& rAuxVector, Vector& rResidual, ProcessInfo& rCurrentProcessInfo) { rElement.InitializeNonLinearIteration(rCurrentProcessInfo); // Dynamic stabilization terms rElement.CalculateRightHandSide(rResidual,rCurrentProcessInfo); // Dynamic Terms rElement.CalculateMassMatrix(rMassMatrix,rCurrentProcessInfo); rElement.GetSecondDerivativesVector(rAuxVector,0); noalias(rResidual) -= prod(rMassMatrix,rAuxVector); // Velocity Terms rElement.CalculateLocalVelocityContribution(rMassMatrix,rResidual,rCurrentProcessInfo); // Note that once we are here, we no longer need the mass matrix } /// Check if a patch index is known void AddNewIndex( std::vector<int>& rIndices, int ThisIndex ) { bool IsNew = true; for( std::vector<int>::iterator itIndex = rIndices.begin(); itIndex != rIndices.end(); ++itIndex) { if( ThisIndex == *itIndex) { IsNew = false; break; } } if (IsNew) rIndices.push_back(ThisIndex); } ///@} // Private operations }; ///@} Kratos classes ///@} Application group } // namespace Kratos #endif /* KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED */
DenseMatrix.h
// Copyright (c) 2004-2022 Tomáš Oberhuber et al. // // This file is part of TNL - Template Numerical Library (https://tnl-project.org/) // // SPDX-License-Identifier: MIT #pragma once namespace TNL { namespace Matrices { namespace details { template< typename Device > class DenseDeviceDependentCode; template<> class DenseDeviceDependentCode< Devices::Host > { public: typedef Devices::Host Device; template< typename Real, typename Index, bool RowMajorOrder, typename RealAllocator, typename InVector, typename OutVector > static void vectorProduct( const DenseMatrixView< Real, Device, Index, RowMajorOrder >& matrix, const InVector& inVector, OutVector& outVector ) { #ifdef HAVE_OPENMP #pragma omp parallel for if( Devices::Host::isOMPEnabled() ) #endif for( Index row = 0; row < matrix.getRows(); row++ ) outVector[ row ] = matrix.rowVectorProduct( row, inVector ); } }; template<> class DenseDeviceDependentCode< Devices::Cuda > { public: typedef Devices::Cuda Device; template< typename Real, typename Index, bool RowMajorOrder, typename RealAllocator, typename InVector, typename OutVector > static void vectorProduct( const DenseMatrixView< Real, Device, Index, RowMajorOrder >& matrix, const InVector& inVector, OutVector& outVector ) { MatrixVectorProductCuda( matrix, inVector, outVector ); } }; #ifdef HAVE_CUDA template< typename Real, typename Index, bool RowMajorOrder, typename RealAllocator, typename Matrix1, typename Matrix2, int tileDim, int tileRowBlockSize > __global__ void DenseMatrixProductKernel( Dense< Real, Devices::Cuda, Index, RowMajorOrder >* resultMatrix, const Matrix1* matrixA, const Matrix2* matrixB, const Real matrixAMultiplicator, const Real matrixBMultiplicator, const Index gridIdx_x, const Index gridIdx_y ) { /**** * Here we compute product C = A * B. To profit from the fast * shared memory we do it by tiles. */ typedef Index IndexType; typedef Real RealType; __shared__ Real tileA[ tileDim * tileDim ]; __shared__ Real tileB[ tileDim * tileDim ]; __shared__ Real tileC[ tileDim * tileDim ]; const IndexType& matrixARows = matrixA->getRows(); const IndexType& matrixAColumns = matrixA->getColumns(); const IndexType& matrixBRows = matrixB->getRows(); const IndexType& matrixBColumns = matrixB->getColumns(); /**** * Reset the tile C */ for( IndexType row = 0; row < tileDim; row += tileRowBlockSize ) tileC[ ( row + threadIdx.y ) * tileDim + threadIdx.x ] = 0.0; /**** * Compute the result tile coordinates */ const IndexType resultTileRow = ( gridIdx_y * gridDim.y + blockIdx.y ) * tileDim; const IndexType resultTileColumn = ( gridIdx_x * gridDim.x + blockIdx.x ) * tileDim; /**** * Sum over the matrix tiles */ for( IndexType i = 0; i < matrixAColumns; i += tileDim ) { for( IndexType row = 0; row < tileDim; row += tileRowBlockSize ) { const IndexType matrixARow = resultTileRow + threadIdx.y + row; const IndexType matrixAColumn = i + threadIdx.x; if( matrixARow < matrixARows && matrixAColumn < matrixAColumns ) tileA[ ( threadIdx.y + row ) * tileDim + threadIdx.x ] = matrixAMultiplicator * matrixA->getElementFast( matrixARow, matrixAColumn ); const IndexType matrixBRow = i + threadIdx.y + row; const IndexType matrixBColumn = resultTileColumn + threadIdx.x; if( matrixBRow < matrixBRows && matrixBColumn < matrixBColumns ) tileB[ ( threadIdx.y + row ) * tileDim + threadIdx.x ] = matrixBMultiplicator * matrixB->getElementFast( matrixBRow, matrixBColumn ); } __syncthreads(); const IndexType tileALastRow = tnlCudaMin( tileDim, matrixARows - resultTileRow ); const IndexType tileALastColumn = tnlCudaMin( tileDim, matrixAColumns - i ); const IndexType tileBLastRow = tnlCudaMin( tileDim, matrixBRows - i ); const IndexType tileBLastColumn = tnlCudaMin( tileDim, matrixBColumns - resultTileColumn ); for( IndexType row = 0; row < tileALastRow; row += tileRowBlockSize ) { RealType sum( 0.0 ); for( IndexType j = 0; j < tileALastColumn; j++ ) sum += tileA[ ( threadIdx.y + row ) * tileDim + j ] * tileB[ j * tileDim + threadIdx.x ]; tileC[ ( row + threadIdx.y ) * tileDim + threadIdx.x ] += sum; } __syncthreads(); } /**** * Write the result tile to the result matrix */ const IndexType& matrixCRows = resultMatrix->getRows(); const IndexType& matrixCColumns = resultMatrix->getColumns(); for( IndexType row = 0; row < tileDim; row += tileRowBlockSize ) { const IndexType matrixCRow = resultTileRow + row + threadIdx.y; const IndexType matrixCColumn = resultTileColumn + threadIdx.x; if( matrixCRow < matrixCRows && matrixCColumn < matrixCColumns ) resultMatrix->setElementFast( matrixCRow, matrixCColumn, tileC[ ( row + threadIdx.y ) * tileDim + threadIdx.x ] ); } } template< typename Real, typename Index, typename Matrix, bool RowMajorOrder, typename RealAllocator, int tileDim, int tileRowBlockSize > __global__ void DenseTranspositionAlignedKernel( Dense< Real, Devices::Cuda, Index >* resultMatrix, const Matrix* inputMatrix, const Real matrixMultiplicator, const Index gridIdx_x, const Index gridIdx_y ) { __shared__ Real tile[ tileDim * tileDim ]; const Index columns = inputMatrix->getColumns(); const Index rows = inputMatrix->getRows(); /**** * Diagonal mapping of the CUDA blocks */ Index blockIdx_x, blockIdx_y; if( columns == rows ) { blockIdx_y = blockIdx.x; blockIdx_x = ( blockIdx.x + blockIdx.y ) % gridDim.x; } else { Index bID = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bID % gridDim.y; blockIdx_x = ( ( bID / gridDim.y ) + blockIdx_y ) % gridDim.x; } /**** * Read the tile to the shared memory */ const Index readRowPosition = ( gridIdx_y * gridDim.y + blockIdx_y ) * tileDim + threadIdx.y; const Index readColumnPosition = ( gridIdx_x * gridDim.x + blockIdx_x ) * tileDim + threadIdx.x; for( Index rowBlock = 0; rowBlock < tileDim; rowBlock += tileRowBlockSize ) { tile[ Cuda::getInterleaving( threadIdx.x * tileDim + threadIdx.y + rowBlock ) ] = inputMatrix->getElementFast( readColumnPosition, readRowPosition + rowBlock ); } __syncthreads(); /**** * Write the tile to the global memory */ const Index writeRowPosition = ( gridIdx_x * gridDim.x + blockIdx_x ) * tileDim + threadIdx.y; const Index writeColumnPosition = ( gridIdx_y * gridDim.y + blockIdx_y ) * tileDim + threadIdx.x; for( Index rowBlock = 0; rowBlock < tileDim; rowBlock += tileRowBlockSize ) { resultMatrix->setElementFast( writeColumnPosition, writeRowPosition + rowBlock, matrixMultiplicator * tile[ Cuda::getInterleaving( ( threadIdx.y + rowBlock ) * tileDim + threadIdx.x ) ] ); } } template< typename Real, typename Index, bool RowMajorOrder, typename RealAllocator, typename Matrix, int tileDim, int tileRowBlockSize > __global__ void DenseTranspositionNonAlignedKernel( Dense< Real, Devices::Cuda, Index >* resultMatrix, const Matrix* inputMatrix, const Real matrixMultiplicator, const Index gridIdx_x, const Index gridIdx_y ) { __shared__ Real tile[ tileDim * tileDim ]; const Index columns = inputMatrix->getColumns(); const Index rows = inputMatrix->getRows(); /**** * Diagonal mapping of the CUDA blocks */ Index blockIdx_x, blockIdx_y; if( columns == rows ) { blockIdx_y = blockIdx.x; blockIdx_x = ( blockIdx.x + blockIdx.y ) % gridDim.x; } else { Index bID = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bID % gridDim.y; blockIdx_x = ( ( bID / gridDim.y ) + blockIdx_y ) % gridDim.x; } /**** * Read the tile to the shared memory */ const Index readRowPosition = ( gridIdx_y * gridDim.y + blockIdx_y ) * tileDim + threadIdx.y; const Index readColumnPosition = ( gridIdx_x * gridDim.x + blockIdx_x ) * tileDim + threadIdx.x; if( readColumnPosition < columns ) { const Index readOffset = readRowPosition * columns + readColumnPosition; for( Index rowBlock = 0; rowBlock < tileDim; rowBlock += tileRowBlockSize ) { if( readRowPosition + rowBlock < rows ) tile[ Cuda::getInterleaving( threadIdx.x * tileDim + threadIdx.y + rowBlock ) ] = inputMatrix->getElementFast( readColumnPosition, readRowPosition + rowBlock ); } } __syncthreads(); /**** * Write the tile to the global memory */ const Index writeRowPosition = ( gridIdx_x * gridDim.x + blockIdx_x ) * tileDim + threadIdx.y; const Index writeColumnPosition = ( gridIdx_y * gridDim.y + blockIdx_y ) * tileDim + threadIdx.x; if( writeColumnPosition < rows ) { const Index writeOffset = writeRowPosition * rows + writeColumnPosition; for( Index rowBlock = 0; rowBlock < tileDim; rowBlock += tileRowBlockSize ) { if( writeRowPosition + rowBlock < columns ) resultMatrix->setElementFast( writeColumnPosition, writeRowPosition + rowBlock, matrixMultiplicator * tile[ Cuda::getInterleaving( ( threadIdx.y + rowBlock ) * tileDim + threadIdx.x ) ] ); } } } #endif } // namespace details } // namespace Matrices } // namespace TNL
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "ps/ps.h" #include "mxnet/kvstore.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { enum class CommandType { kController, kStopServer, kSyncMode, kSetGradientCompression }; enum class DataHandleType { kDefaultPushPull, kCompressedPushPull, kRowSparsePushPull }; /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this]{return !queue_.empty();}); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { } Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<float>(0); static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle( std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle( std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct MergeBuf { std::vector<ps::KVMeta> request; NDArray array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); if (recved_type == CommandType::kStopServer) { exec_.Stop(); } else if (recved_type == CommandType::kSyncMode) { sync_mode_ = true; } else if (recved_type == CommandType::kSetGradientCompression) { gradient_compression_->DecodeParams(recved.body); } else { // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); } app->Response(recved); } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<real_t>& req_data, ps::KVServer<real_t>* server) { DataHandleType recved_type = static_cast<DataHandleType>(req_meta.cmd); if (recved_type == DataHandleType::kRowSparsePushPull) { DataHandleRowSparse(req_meta, req_data, server); } else if (recved_type == DataHandleType::kCompressedPushPull) { DataHandleCompressed(req_meta, req_data, server); } else { DataHandleDefault(req_meta, req_data, server); } return; } inline void ApplyUpdates(const int key, MergeBuf *merged, NDArray *stored, ps::KVServer<real_t>* server) { if (merged->request.size() == (size_t) ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python if (updater_) { exec_.Exec([this, key, merged, stored](){ CHECK(updater_); updater_(key, merged->array, stored); }); } else { // if no updater, just copy CopyFromTo(merged->array, stored); } if (log_verbose_) { LOG(INFO) << "sync response to " << merged->request.size() << " workers"; } for (const auto& req : merged->request) { server->Response(req); } merged->request.clear(); stored->WaitToRead(); } else { merged->array.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void DataHandleRowSparse(const ps::KVMeta& req_meta, const ps::KVPairs<real_t>& req_data, ps::KVServer<real_t>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); real_t* data = req_data.vals.data(); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context()); Engine::Get()->PushAsync( [recved, stored](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); op::PopulateFullIdxRspImpl(s, &rsp); mshadow::Copy(rsp.data().FlatTo1D<cpu, float>(), recved.data().FlatTo1D<cpu, float>(), s); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); stored.WaitToRead(); server->Response(req_meta); return; } // synced push if (sync_mode_) { if (log_verbose_) LOG(INFO) << "sync push: " << master_key << " " << req_data.keys; auto& merged = merge_buf_[master_key]; if (merged.array.is_none()) { merged.array = NDArray(kRowSparseStorage, stored.shape(), Context()); } if (num_rows == 0) { // reset to zeros if (merged.request.size() == 0) { merged.array = NDArray(kRowSparseStorage, stored.shape(), Context()); } else { // nothing to aggregate } merged.request.push_back(req_meta); ApplyUpdates(master_key, &merged, &stored, server); return; } auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (merged.request.size() == 0) { CopyFromTo(recved, &merged.array, 0); } else { NDArray out(kRowSparseStorage, stored.shape(), Context()); std::vector<Engine::VarHandle> const_vars; const_vars.push_back(recved.var()); const_vars.push_back(merged.array.var()); // accumulate row_sparse gradients // TODO(haibin) override + operator for row_sparse NDArray // instead of calling BinaryComputeRspRsp directly using namespace mshadow; Engine::Get()->PushAsync( [recved, merged, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {recved, merged.array}, {kWriteTo}, {out}); on_complete(); }, recved.ctx(), const_vars, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &merged.array, 0); } merged.request.push_back(req_meta); ApplyUpdates(master_key, &merged, &stored, server); } else { // async push if (log_verbose_) LOG(INFO) << "async push: " << master_key; if (num_rows == 0) { server->Response(req_meta); return; } auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); exec_.Exec([this, master_key, &recved, &stored](){ CHECK(updater_); updater_(master_key, recved, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<real_t> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const float* data = stored.data().dptr<float>(); auto len = unit_len * num_rows; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_len; auto begin = (i - 1) * unit_len; auto end = i * unit_len; response.vals.segment(begin, end).CopyFrom(src, unit_len); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } } void DefaultStorageResponse(int key, const NDArray& stored, const ps::KVMeta& req_meta, const ps::KVPairs<real_t> &req_data, ps::KVServer<real_t>* server) { ps::KVPairs<real_t> response; CHECK(!stored.is_none()) << "init " << key << " first"; auto len = stored.shape().Size(); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const float*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const ps::KVMeta& req_meta, const ps::KVPairs<real_t> &req_data, ps::KVServer<real_t>* server) { if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1]}; TShape dshape(ds, ds + 1); TBlob recv_blob((real_t*) req_data.vals.data(), // NOLINT(*) dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = TShape{(int64_t) original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = merge_buf_[key]; if (merged.array.is_none()) { merged.array = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.array, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.array += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(key, &merged, &stored, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(key, store_[key], req_meta, req_data, server); } } void DataHandleDefault(const ps::KVMeta& req_meta, const ps::KVPairs<real_t> &req_data, ps::KVServer<real_t>* server) { CHECK_EQ(req_meta.cmd, static_cast<int>(DataHandleType::kDefaultPushPull)); // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished ps::KVPairs<real_t> resp;// xym add resp.keys = req_data.keys;// xym add if (req_meta.push) { size_t ds[] = {(size_t)req_data.lens[0]}; TShape dshape(ds, ds + 1); TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*) dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context()); CopyFromTo(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = merge_buf_[key]; if (merged.array.is_none()) { merged.array = NDArray(dshape, Context()); } if (merged.request.size() == 0) { CopyFromTo(recved, &merged.array, 0); } else { merged.array += recved; } merged.request.push_back(req_meta); ApplyUpdates(key, &merged, &stored, server); } else { // async push exec_.Exec([this, key, &recved, &stored](){ CHECK(updater_); updater_(key, recved, &stored); }); // server->Response(req_meta); server->Response(req_meta, resp);//xym add this 2018-01-02 stored.WaitToRead(); } } else { DefaultStorageResponse(key, stored, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, MergeBuf> merge_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<float>* ps_server_; // whether to LOG verbose information bool log_verbose_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
SmallestElement.c
/* Gustavo T. Mastrobuono, NUSP 10734411; Henrique de S. Q. dos Santos, NUSP 10819029; Jhordan P. V. Pesantes, NUSP 11733353; Witor M. A. de Oliveira, NUSP 10692190; Yorvin A. R. Carrion, NUSP 11733332; */ #include <stdio.h> #include <omp.h> #include <stdlib.h> // valor maximo na matriz. Se a matriz tiver valores que // ultrapassam as 6 casas, aumentar esse valor também #define MAX_VALUE 999999 int main(){ omp_set_nested(1); // permitindo o uso de paralelismos aninhados int **mat, i = 0, j = 0, n = 0, pos = 0; int menorLocal = MAX_VALUE, menorGlobal = MAX_VALUE; // tamanho da matriz e posição nela scanf("%d\n%d\n", &n, &pos); // Alocação de memória para a matriz mat = (int**) malloc(n * sizeof(int*)); for(int j = 0; j < n; j++) mat[j] = (int*) malloc(n * sizeof(int)); // Lendo valores e armazenando na matriz for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) scanf("%d", &mat[i][j]); // Esse vetor armazenará o maior valor encontrado // para cada linha da matriz. Aqui alocamos memória // para ele e o inicializamos int *vet = (int *) malloc(n * sizeof(int)); for(i = 0; i < n; i++) vet[i] = MAX_VALUE; // Criando uma região paralela que irá criar N threads // uma thread para cada linha. Para cada uma dessas threads // serão geradas mais N/T threads que ficarão responsáveis // por verificar a existência de um menor maior valor // nas N/T partes da linha que ficou responsável. // O OMP define automaticamente o numero T de threads. #pragma omp parallel for shared(vet) schedule(static, n) for(i = 0; i < n; i++){ #pragma omp parallel for firstprivate(menorLocal, pos) shared(vet) for(j = 0; j < n; j++){ if((mat[i][j] > mat[i][pos]) && (mat[i][j] < menorLocal)) menorLocal = mat[i][j]; #pragma omp critical if(menorLocal < vet[i]) vet[i] = menorLocal; } } for (i = 0; i < n; i++){ if(vet[i] == MAX_VALUE) printf("-1 "); else printf("%d ", vet[i]); } printf("\n"); // Liberando memória alocada para matriz for(i = 0; i< n; i++) free(mat[i]); free(mat); // Liberando memória alocada para vetor // com os menores maiores valores de cada linha free(vet); return 0; }
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MaxTextExtent], *description; RectangleInfo extent; MagickWand *wand; CacheView *view; size_t number_threads; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->number_threads=wand_view->number_threads; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) wand_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width,wand_view->number_threads); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~WandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelBlack(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MaxTextExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != WandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == WandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (destination->extent.height-destination->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetWandViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w T h r e a d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewThreads() sets the number of threads in a thread team. % % The format of the SetWandViewDescription method is: % % void SetWandViewThreads(WandView *image_view, % const size_t number_threads) % % A description of each parameter follows: % % o image_view: the image view. % % o number_threads: the number of threads in a thread team. % */ MagickExport void SetWandViewThreads(WandView *image_view, const size_t number_threads) { assert(image_view != (WandView *) NULL); assert(image_view->signature == MagickSignature); image_view->number_threads=number_threads; if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource)) image_view->number_threads=GetOpenMPMaximumThreads(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdateWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
sa.c
#include "common.h" static void restore_edge(const int groups, const int kind_opt, int* restrict edge, int* restrict restored_line, const int* restrict restored_edge) { if(kind_opt != D_1G_OPT && kind_opt != D_2G_OPT) ERROR("Wrong kind_opt: %d\n", kind_opt); #pragma omp parallel for for(int i=0;i<groups*kind_opt;i++){ edge[restored_line[i]*2 ] = restored_edge[i*2 ]; edge[restored_line[i]*2+1] = restored_edge[i*2+1]; } } static void restore_adj(const int degree, const int groups, int* restrict adj, const int kind_opt, int* restrict restored_adj_value, int* restrict restored_adj_idx_y, int* restrict restored_adj_idx_x) { if(kind_opt != D_1G_OPT && kind_opt != D_2G_OPT) ERROR("Wrong kind_opt: %d\n", kind_opt); #pragma omp parallel for for(int i=0;i<kind_opt*groups*2;i++){ int y = restored_adj_idx_y[i]; int x = restored_adj_idx_x[i]; adj[y*degree+x] = restored_adj_value[i]; } } static void copy_edge(int *restrict dst, const int *restrict src, const int n) { #pragma omp parallel for for(int i=0;i<n;i++) dst[i] = src[i]; } static double uniform_rand() { return ((double)random()+1.0)/((double)RAND_MAX+2.0); } static void print_result_header() { PRINT_R0(" Times\t Temp\tCur. ASPL GAP\t\tBest ASPL GAP\t\t"); PRINT_R0("Cur. Dia. GAP\t\tBest Dia. GAP\tAccept Rate\n"); } static void print_results(const long long num, const double temp, const double current_ASPL, const double best_ASPL, const double low_ASPL, const int current_diam, const int best_diam, const int low_diam, const long long accepts, const long long rejects) { PRINT_R0("%8lld\t%f\t", num, temp); PRINT_R0("%f ( %f )\t%f ( %f )\t%d ( %d )\t\t\t%d ( %d )\t\t", current_ASPL, current_ASPL-low_ASPL, best_ASPL, best_ASPL-low_ASPL, current_diam, current_diam-low_diam, best_diam, best_diam-low_diam); if(num != 0) PRINT_R0("%.4f ( %lld / %lld )\n", (double)accepts/(accepts+rejects), accepts, (accepts+rejects)); else PRINT_R0("-\n"); } void create_adj(const int nodes, const int lines, const int degree, const int edge[lines][2], int adj[nodes][degree]) { int count[nodes]; for(int i=0;i<nodes;i++) count[i] = 0; for(int i=0;i<lines;i++){ int n1 = edge[i][0]; int n2 = edge[i][1]; adj[n1][count[n1]++] = n2; adj[n2][count[n2]++] = n1; } } #define CENTER_VERTEX -1 int distance(int nodes, const int a, const int b, const int added_centers) { if(a >= nodes-added_centers || b >= nodes-added_centers) return CENTER_VERTEX; int v = MAX(a, b) - MIN(a, b); if(added_centers) nodes -= added_centers; return (v < nodes/2.0)? v : nodes-v; } bool check(const int nodes, const int based_nodes, const int lines, const int degree, const int groups, int edge[lines][2], const int added_centers, int* adj, const int ii) { bool flag = true; int based_lines = lines/groups; #pragma omp parallel for for(int i=0;i<based_lines;i++){ for(int j=1;j<groups;j++){ int k = j * based_lines + i; if(distance(nodes, edge[i][0], edge[i][1], added_centers) != distance(nodes, edge[k][0], edge[k][1], added_centers)){ PRINT_R0("check 1: %d\n", ii); PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d d=%d\n", i, edge[i][0], i, edge[i][1], distance(nodes, edge[i][0], edge[i][1], added_centers)); PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d d=%d\n", k, edge[k][0], k, edge[k][1], distance(nodes, edge[k][0], edge[k][1], added_centers)); flag = false; } } } #pragma omp parallel for for(int i=0;i<based_lines;i++){ for(int j=1;j<groups;j++){ int k = j * based_lines + i; if(order(nodes, edge[i][0], edge[i][1], added_centers) != order(nodes, edge[k][0], edge[k][1], added_centers)){ PRINT_R0("check 2 : %d\n", ii); PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d %d\n", i, edge[i][0], i, edge[i][1], order(nodes, edge[i][0], edge[i][1], added_centers)); PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d %d\n", k, edge[k][0], k, edge[k][1], order(nodes, edge[k][0], edge[k][1], added_centers)); flag = false; } } } #pragma omp parallel for for(int i=0;i<based_lines;i++){ if(order(nodes, edge[i][0], edge[i][1], added_centers) != MIDDLE) for(int j=1;j<groups;j++){ int k = j * based_lines + i; int tmp0 = edge[k][0] - edge[k-based_lines][0]; int tmp1 = edge[k][1] - edge[k-based_lines][1]; if(added_centers){ tmp0 = (tmp0 < 0)? tmp0+nodes-added_centers : tmp0; tmp1 = (tmp1 < 0)? tmp1+nodes-added_centers : tmp1; } else{ tmp0 = (tmp0 < 0)? tmp0 + nodes : tmp0; tmp1 = (tmp1 < 0)? tmp1 + nodes : tmp1; } if(tmp0 != based_nodes || tmp1 != based_nodes){ PRINT_R0("check 4: %d\n", ii); PRINT_R0("The different group relationship\n"); PRINT_R0("edge[%d][0]-edge[%d][0] = %d - %d = %d != %d\n", k, k-based_lines, edge[k][0], edge[k-based_lines][0], tmp0, based_nodes); PRINT_R0("edge[%d][1]-edge[%d][1] = %d - %d = %d != %d\n", k, k-based_lines, edge[k][1], edge[k-based_lines][1], tmp1, based_nodes); flag = false; } } } if(adj != NULL){ int *tmp_adj = malloc(sizeof(int)*nodes*degree); create_adj(nodes, lines, degree, (const int (*)[2])edge, (int (*)[degree])tmp_adj); for(int i=0;i<nodes;i++){ int sum[2] = {0,0}; for(int j=0;j<degree;j++){ sum[0] += *(adj + i * degree + j); sum[1] += *(tmp_adj + i * degree + j); } if(sum[0] != sum[1]){ PRINT_R0("[ii=%d] Error 5 %d %d\n", ii, sum[0], sum[1]); for(int j=0;j<degree;j++) PRINT_R0("%d ", *(adj + i * degree + j)); PRINT_R0("\n"); for(int j=0;j<degree;j++) PRINT_R0("%d ", *(tmp_adj + i * degree + j)); PRINT_R0("\n"); flag = false; break; } } for(int i=0;i<nodes;i++){ for(int j=0;j<degree;j++){ int tmp = *(adj + i * degree + j); int k; for(k=0;k<degree;k++) if(tmp == *(tmp_adj + i * degree + k)) break; if(k == degree){ PRINT_R0("[ii=%d] Error 6\n", ii); flag = false; break; } } } for(int i=0;i<nodes;i++){ for(int j=0;j<degree;j++){ int tmp = *(tmp_adj + i * degree + j); int k; for(k=0;k<degree;k++) if(tmp == *(adj + i * degree + k)) break; if(k == degree){ PRINT_R0("[ii=%d] Error 7\n", ii); flag = false; break; } } } for(int i=0;i<nodes;i++){ for(int j=0;j<degree;j++){ int tmp = *(adj + i * degree + j); for(int k=j+1;k<degree;k++) if(tmp == *(adj + i * degree + k)){ flag = false; break; } } } free(tmp_adj); } return flag; } bool has_duplicated_vertex(const int e00, const int e01, const int e10, const int e11) { return (e00 == e10 || e01 == e11 || e00 == e11 || e01 == e10); } static void exchange_edge_2opt(const int nodes, const int lines, const int groups, const int degree, const int based_nodes, int edge[lines][2], const int added_centers, int* restrict adj, int *kind_opt, int* restrict restored_edge, int* restrict restored_line, int* restrict restored_adj_value, int* restrict restored_adj_idx_y, int* restrict restored_adj_idx_x, const bool is_simple_graph, const int ii) { int tmp_line[groups*2], tmp_edge[groups*2][2], r; int based_lines = lines / groups; while(1){ while(1){ while(1){ tmp_line[0] = getRandom(lines); tmp_line[1] = getRandom(lines); if(tmp_line[0] != tmp_line[1]) break; } if(has_duplicated_vertex(edge[tmp_line[0]][0], edge[tmp_line[0]][1], edge[tmp_line[1]][0], edge[tmp_line[1]][1])){ continue; } else if((tmp_line[0] - tmp_line[1]) % based_lines == 0){ if(edge_1g_opt(edge, nodes, lines, degree, based_nodes, based_lines, groups, tmp_line[0], added_centers, adj, kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, ii)) return; else continue; } else break; } bool flag0 = (distance(nodes, edge[tmp_line[0]][0], edge[tmp_line[0]][1], added_centers) == (nodes-added_centers)/2); bool flag1 = (distance(nodes, edge[tmp_line[1]][0], edge[tmp_line[1]][1], added_centers) == (nodes-added_centers)/2); bool diameter_flag = ((flag0 || flag1) && groups%2 == 0); if(diameter_flag){ if(edge_1g_opt(edge, nodes, lines, degree, based_nodes, based_lines, groups, tmp_line[0], added_centers, adj, kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, ii)) return; else continue; } // 2g-opt for(int i=1;i<groups;i++){ int tmp0 = tmp_line[0] + based_lines * i; int tmp1 = tmp_line[1] + based_lines * i; tmp_line[0+2*i] = (tmp0 >= lines)? tmp0 - lines : tmp0; tmp_line[1+2*i] = (tmp1 >= lines)? tmp1 - lines : tmp1; } for(int i=0;i<groups*2;i++) for(int j=0;j<2;j++) tmp_edge[i][j] = edge[tmp_line[i]][j]; r = getRandom(2); if(r == 0){ for(int i=0;i<groups;i++) swap(&tmp_edge[i*2][1], &tmp_edge[i*2+1][1]); } else{ for(int i=0;i<groups;i++) swap(&tmp_edge[i*2][1], &tmp_edge[i*2+1][0]); } assert(check_loop(groups*2, tmp_edge)); if(!check_duplicate_tmp_edge(2, groups, tmp_edge)) continue; else if(!check_duplicate_current_edge(lines, groups*2, tmp_line, edge, tmp_edge, groups, 2, false)) continue; else break; } // end while for(int i=0;i<groups*2;i++) if(order(nodes, tmp_edge[i][0], tmp_edge[i][1], added_centers) == RIGHT) swap(&tmp_edge[i][0], &tmp_edge[i][1]); // RIGHT -> LEFT if(is_simple_graph){ // Change a part of adj. int y0[groups], y1[groups], y2[groups], y3[groups]; int x0[groups], x1[groups], x2[groups], x3[groups]; #pragma omp parallel for for(int i=0;i<groups;i++){ y0[i] = edge[tmp_line[i*2 ]][0]; y1[i] = edge[tmp_line[i*2 ]][1]; y2[i] = edge[tmp_line[i*2+1]][0]; y3[i] = edge[tmp_line[i*2+1]][1]; for(x0[i]=0;x0[i]<degree;x0[i]++) if(adj[y0[i]*degree+x0[i]] == y1[i]) break; for(x1[i]=0;x1[i]<degree;x1[i]++) if(adj[y1[i]*degree+x1[i]] == y0[i]) break; for(x2[i]=0;x2[i]<degree;x2[i]++) if(adj[y2[i]*degree+x2[i]] == y3[i]) break; for(x3[i]=0;x3[i]<degree;x3[i]++) if(adj[y3[i]*degree+x3[i]] == y2[i]) break; if(x0[i] == degree || x1[i] == degree || x2[i] == degree || x3[i] == degree) ERROR("%d : %d %d %d %d\n", ii, x0[i], x1[i], x2[i], x3[i]); restored_adj_idx_y[i*4 ] = y0[i]; restored_adj_idx_x[i*4 ] = x0[i]; restored_adj_idx_y[i*4+1] = y1[i]; restored_adj_idx_x[i*4+1] = x1[i]; restored_adj_idx_y[i*4+2] = y2[i]; restored_adj_idx_x[i*4+2] = x2[i]; restored_adj_idx_y[i*4+3] = y3[i]; restored_adj_idx_x[i*4+3] = x3[i]; restored_adj_value[i*4 ] = adj[y0[i]*degree+x0[i]]; restored_adj_value[i*4+1] = adj[y1[i]*degree+x1[i]]; restored_adj_value[i*4+2] = adj[y2[i]*degree+x2[i]]; restored_adj_value[i*4+3] = adj[y3[i]*degree+x3[i]]; // restored_line[i*2 ] = tmp_line[i*2 ]; restored_line[i*2+1] = tmp_line[i*2+1]; restored_edge[i*4 ] = edge[tmp_line[i*2 ]][0]; restored_edge[i*4+1] = edge[tmp_line[i*2 ]][1]; restored_edge[i*4+2] = edge[tmp_line[i*2+1]][0]; restored_edge[i*4+3] = edge[tmp_line[i*2+1]][1]; } #pragma omp parallel for for(int i=0;i<groups;i++){ if(r==0){ adj[y0[i]*degree+x0[i]] = y3[i]; adj[y1[i]*degree+x1[i]] = y2[i]; adj[y2[i]*degree+x2[i]] = y1[i]; adj[y3[i]*degree+x3[i]] = y0[i]; } else{ adj[y0[i]*degree+x0[i]] = y2[i]; adj[y1[i]*degree+x1[i]] = y3[i]; adj[y2[i]*degree+x2[i]] = y0[i]; adj[y3[i]*degree+x3[i]] = y1[i]; } } } #pragma omp parallel for for(int i=0;i<groups;i++){ edge[tmp_line[i*2 ]][0] = tmp_edge[i*2 ][0]; edge[tmp_line[i*2+1]][0] = tmp_edge[i*2+1][0]; edge[tmp_line[i*2 ]][1] = tmp_edge[i*2 ][1]; edge[tmp_line[i*2+1]][1] = tmp_edge[i*2+1][1]; } *kind_opt = D_2G_OPT; } static bool accept(const int new_diam, const int current_diam, const double new_ASPL, const double current_ASPL, const double temp, const int nodes, const int groups, const bool hill_climbing_flag, const bool detect_temp_flag, const long long i, double *max_diff_energy, long long *total_accepts, long long *accepts, long long *rejects) { if(new_diam < current_diam){ *accepts += 1; if(i > SKIP_ACCEPTS) *total_accepts +=1; return true; } else if(new_diam > current_diam){ *rejects += 1; return false; } else{ // new_diam == current_diam if(new_ASPL <= current_ASPL){ *accepts += 1; if(i > SKIP_ACCEPTS) *total_accepts +=1; return true; } else if(hill_climbing_flag){ // Only accept when ASPL <= current_ASPL. *rejects += 1; return false; } double diff = ((current_ASPL-new_ASPL)*nodes*(nodes-1))/groups; if(detect_temp_flag) *max_diff_energy = MAX(*max_diff_energy, -1.0 * diff); if(exp(diff/temp) > uniform_rand()){ *accepts += 1; if(i > SKIP_ACCEPTS) *total_accepts +=1; return true; } else{ *rejects += 1; return false; } } } long long sa(const int nodes, const int lines, const int degree, const int groups, double temp, const long long ncalcs, const double cooling_rate, const int low_diam, const double low_ASPL, const bool hill_climbing_flag, const bool detect_temp_flag, double *max_diff_energy, int edge[lines][2], int *diam, double *ASPL, const int cooling_cycle, const int added_centers, const int based_nodes, long long *total_accepts, const bool is_simple_graph, const int num_degrees[nodes], const int algo) { long long ii, accepts = 0, rejects = 0; int best_edge[lines][2], tmp_edge[lines][2], tmp_edge_nsg[lines][2] /* nsg = not simple graph */, kind_opt; int restored_adj_value[groups*4], restored_adj_idx_y[groups*4], restored_adj_idx_x[groups*4]; int restored_edge[groups*4], restored_line[groups*2]; bool restore_flag = false; copy_edge((int *)best_edge, (int *)edge, lines*2); copy_edge((int *)tmp_edge, (int *)edge, lines*2); // Create adj matrix int *adj = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree]; create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj); evaluation(nodes, based_nodes, groups, lines, degree, adj, diam, ASPL, added_centers, num_degrees, algo); double current_ASPL = *ASPL; double best_ASPL = *ASPL; int current_diam = *diam; int best_diam = *diam; int print_interval = (ncalcs/NUM_OF_PROGRESS == 0)? 1 : ncalcs/NUM_OF_PROGRESS; if(rank == 0 && !detect_temp_flag) print_result_header(); for(ii=0;ii<ncalcs;ii++){ double tmp_ASPL; int tmp_diam; if(ii % print_interval == 0 && !detect_temp_flag){ print_results(ii, temp, current_ASPL, best_ASPL, low_ASPL, current_diam, best_diam, low_diam, accepts, rejects); accepts = 0; rejects = 0; } while(1){ if(is_simple_graph){ if(restore_flag){ restore_adj(degree, groups, adj, kind_opt, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x); restore_edge(groups, kind_opt, (int *)tmp_edge, restored_line, restored_edge); } } else{ copy_edge((int *)tmp_edge_nsg, (int *)tmp_edge, lines*2); } exchange_edge_2opt(nodes, lines, groups, degree, based_nodes, tmp_edge, added_centers, adj, &kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, (int)ii); if(!is_simple_graph) create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj); assert(check(nodes, based_nodes, lines, degree, groups, tmp_edge, added_centers, adj, (int)ii)); if(evaluation(nodes, based_nodes, groups, lines, degree, adj, &tmp_diam, &tmp_ASPL, added_centers, num_degrees, algo)) break; else{ if(is_simple_graph) restore_flag = true; else copy_edge((int *)tmp_edge, (int *)tmp_edge_nsg, lines*2); } } if(!accept(tmp_diam, current_diam, tmp_ASPL, current_ASPL, temp, nodes, groups, hill_climbing_flag, detect_temp_flag, ii, max_diff_energy, total_accepts, &accepts, &rejects)){ if(is_simple_graph) restore_flag = true; else copy_edge((int *)tmp_edge, (int *)tmp_edge_nsg, lines*2); } else{ if(is_simple_graph) restore_flag = false; current_ASPL = tmp_ASPL; current_diam = tmp_diam; if((best_diam > current_diam) || (best_diam == current_diam && best_ASPL > current_ASPL)){ copy_edge((int *)best_edge, (int *)tmp_edge, lines*2); best_ASPL = current_ASPL; best_diam = current_diam; } if(best_diam == current_diam && best_ASPL == low_ASPL){ if(!detect_temp_flag){ print_results(ii, temp, current_ASPL, best_ASPL, low_ASPL, current_diam, best_diam, low_diam, accepts, rejects); PRINT_R0("---\nFound optimum solution.\n"); } break; } } if((ii+1)%cooling_cycle == 0) temp *= cooling_rate; } *ASPL = best_ASPL; *diam = best_diam; copy_edge((int *)edge, (int *)best_edge, lines*2); free(adj); return ii; } #define ESTIMATED_TIMES 5 double estimate_elapse_time(const int nodes, const int based_nodes, const int lines, const int degree, const int groups, int edge[lines][2], const int added_centers, const bool is_simple_graph, const int num_degrees[nodes], const int algo) { int diam; // Not use double ASPL; // Not use int *adj = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree]; int (*tmp_edge)[2] = malloc(sizeof(int)*lines*2); // int tmp_edge[lines][2]; int kind_opt; int restored_adj_value[groups*4], restored_adj_idx_y[groups*4], restored_adj_idx_x[groups*4]; int restored_edge[groups*4], restored_line[groups*2]; copy_edge((int *)tmp_edge, (int *)edge, lines*2); create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj); timer_start(TIMER_ESTIMATED); for(int i=0;i<ESTIMATED_TIMES;i++){ exchange_edge_2opt(nodes, lines, groups, degree, based_nodes, tmp_edge, added_centers, adj, &kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, (int)i); if(!is_simple_graph) create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj); assert(check(nodes, based_nodes, lines, degree, groups, tmp_edge, added_centers, adj, (int)i)); evaluation(nodes, based_nodes, groups, lines, degree, adj, &diam, &ASPL, added_centers, num_degrees, algo); } timer_stop(TIMER_ESTIMATED); free(tmp_edge); free(adj); return timer_read(TIMER_ESTIMATED)/ESTIMATED_TIMES; } // This function is mainly useful when groupe is 1. void check_current_edge(const int nodes, const int degree, const int lines, const int groups, const int based_nodes, int edge[lines][2], const double low_ASPL, const int added_centers, const int num_degrees[nodes], const int algo) { int diam; // Not use double ASPL; int (*adj)[degree] = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree]; create_adj(nodes, lines, degree, (const int (*)[2])edge, adj); if(! evaluation(nodes, based_nodes, groups, lines, degree, (int *)adj, &diam, &ASPL, added_centers, num_degrees, algo)) ERROR("The input file has a node which is never reached by another node.\n"); if(ASPL == low_ASPL) END("The input file has already optimum solution.\n"); free(adj); }
cpu_bound.c
/* * Copyright (c) 2009, 2010, 2011, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <assert.h> #include <stdint.h> #include <omp.h> #include <arch/x86/barrelfish_kpi/asm_inlines_arch.h> #define WORK_PERIOD 5000000000UL #define STACK_SIZE (64 * 1024) int main(int argc, char *argv[]) { uint64_t now, start; volatile uint64_t workcnt, workload = 0; int64_t workmax = 1000; int64_t i; if(argc == 1) { printf("calibrating...\n"); do { workload = 0; workmax *= 2; start = rdtsc(); for(i = 0; i < workmax; i++) { workload++; } now = rdtsc(); } while(now - start < WORK_PERIOD); // Compute so the max number of CPUs would calc for WORK_PERIOD workmax *= omp_get_num_procs(); printf("workmax = %ld\n", workmax); return 0; } else { workmax = atol(argv[1]); } int nthreads = omp_get_max_threads(); if(argc == 3) { nthreads = atoi(argv[2]); bomp_bomp_init(nthreads); omp_set_num_threads(nthreads); } printf("threads %d, workmax %ld, CPUs %d\n", nthreads, workmax, omp_get_num_procs()); start = rdtsc(); // Do some work #pragma omp parallel for private(workcnt) for(i = 0; i < workmax; i++) { workcnt++; } now = rdtsc(); printf("%s: threads %d, compute time %lu ticks\n", argv[0], nthreads, now - start); for(;;); return 0; }
mandel-omp-task-point.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ #pragma omp parallel #pragma omp single for (row = 0; row < height; ++row) { for (col = 0; col < width; ++col) { #pragma omp task private(col) firstprivate(row) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("mandel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
laplace2d.c
/* * Copyright 2017 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <string.h> #include "timer.h" #define NN 4096 #define NM 4096 double A[NN][NM]; double Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; while ( error > tol && iter < iter_max ) { error = 0.0; #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); return 0; }
SpMat.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SPMAT_H_ #define SRC_SPMAT_H_ #include <string> #include <algorithm> #include "GMDP/utils/binary_search.h" template <typename T> bool compare_tile_id(const tedge_t<T>& a, const tedge_t<T>& b) { if (a.tile_id < b.tile_id) return true; return false; } template <typename SpTile> class SpMat { public: int ntiles_x; int ntiles_y; int m; int n; std::vector<std::vector<SpTile*> > tiles; std::vector<int> start_idx; std::vector<int> start_idy; std::vector<int> nodeIds; friend boost::serialization::access; template<class Archive> void serialize(Archive& ar, const unsigned int version) { ar & ntiles_x; ar & ntiles_y; ar & m; ar & n; ar & tiles; ar & start_idx; ar & start_idy; ar & nodeIds; } inline int getPartition(const int src, const int dst, int* ival, int* jval) const { (*ival) = -1; (*jval) = -1; for (int i = 0; i < ntiles_y; i++) { if ((src > start_idy[i]) && (src <= start_idy[i + 1])) { (*ival) = i; break; } } for (int j = 0; j < ntiles_x; j++) { if ((dst > start_idx[j]) && (dst <= start_idx[j + 1])) { (*jval) = j; break; } } if ((*ival) == -1 || (*jval) == -1) { printf("%d %d == -1\n", src, dst); printf("idx: %d %d\n", start_idx[0], start_idx[1]); printf("idy: %d %d\n", start_idy[0], start_idy[1]); printf("ntiles_y:%d ntiles_x:%d\n", ntiles_y, ntiles_x); return -1; } return (*ival) + (*jval) * ntiles_y; } template <typename T> void ingestEdgelist(edgelist_t<T>& blob) { int global_nrank = get_global_nrank(); int global_myrank = get_global_myrank(); int nnz_l = blob.nnz; edge_t<T>* edge_list = blob.edges; int m = blob.m; int n = blob.n; printf("Rank %d: Before shuffle %d edges\n", global_myrank, blob.nnz); edge_t<T> * received_edges; unsigned long int new_nnz = 0; if(global_nrank == 1) { new_nnz = nnz_l; received_edges = new edge_t<T>[new_nnz]; memcpy(received_edges, edge_list, new_nnz * sizeof(edge_t<T>)); } else { tedge_t<T> * tedges = new tedge_t<T>[nnz_l]; #pragma omp parallel for for(unsigned long i = 0 ; i < nnz_l ; i++) { tedges[i].src = edge_list[i].src; tedges[i].dst = edge_list[i].dst; tedges[i].val = edge_list[i].val; int ival, jval; int tile_id = getPartition(edge_list[i].src, edge_list[i].dst, &ival, &jval); assert(tile_id != -1); tedges[i].tile_id = nodeIds[ival + jval * ntiles_y]; } __gnu_parallel::sort(tedges, tedges + nnz_l, compare_tile_id<T>); int * assignment = new int[nnz_l]; #pragma omp parallel for for(unsigned long i = 0 ; i < nnz_l ; i++) { edge_list[i].src = tedges[i].src; edge_list[i].dst = tedges[i].dst; edge_list[i].val = tedges[i].val; assignment[i] = tedges[i].tile_id; } delete [] tedges; unsigned long int * positions = new unsigned long[global_nrank+1]; unsigned long int * counts = new unsigned long[global_nrank]; unsigned long int * recv_positions = new unsigned long[global_nrank+1]; unsigned long int * recv_counts = new unsigned long[global_nrank]; unsigned long int current_count = 0; for(int i = 0 ; i < global_nrank ; i++) { int point = binary_search_right_border(assignment, i, 0, nnz_l, nnz_l); if(point == -1) { counts[i] = 0; positions[i] = current_count; } else { counts[i] = (point+1) - current_count; positions[i] = current_count; current_count = (point+1); } } positions[global_nrank] = nnz_l; MPI_Barrier(MPI_COMM_WORLD); delete [] assignment; MPI_Request* mpi_req = new MPI_Request[2 * global_nrank]; MPI_Status* mpi_status = new MPI_Status[2 * global_nrank]; for (int i = 0; i < global_nrank; i++) { MPI_Isend(&counts[i], 1, MPI_UNSIGNED_LONG, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]); } for (int i = 0; i < global_nrank; i++) { MPI_Irecv(&recv_counts[i], 1, MPI_UNSIGNED_LONG, i, i, MPI_COMM_WORLD, &mpi_req[i + global_nrank]); } MPI_Waitall(2 * global_nrank, mpi_req, mpi_status); MPI_Barrier(MPI_COMM_WORLD); recv_positions[0] = 0; for(int i = 0 ; i < global_nrank ; i++) { new_nnz += recv_counts[i]; recv_positions[i+1] = new_nnz; } printf("Rank %d: After shuffle %ld edges\n", global_myrank, new_nnz); MPI_Datatype MPI_EDGE_T; MPI_Type_contiguous(sizeof(edge_t<T>), MPI_CHAR, &MPI_EDGE_T); MPI_Type_commit(&MPI_EDGE_T); for (int i = 0; i < global_nrank; i++) { MPI_Isend(edge_list + positions[i], counts[i] , MPI_EDGE_T, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]); } received_edges = new edge_t<T>[new_nnz]; for (int i = 0; i < global_nrank; i++) { MPI_Irecv(received_edges + recv_positions[i], recv_counts[i] , MPI_EDGE_T, i, i, MPI_COMM_WORLD, &mpi_req[i+global_nrank]); } MPI_Waitall(2 * global_nrank, mpi_req, mpi_status); MPI_Barrier(MPI_COMM_WORLD); delete [] mpi_status; delete [] mpi_req; delete [] positions; delete [] counts; delete [] recv_positions; delete [] recv_counts; } printf("Rank %d: After shuffle %ld edges\n", global_myrank, new_nnz); tedge_t<T> * tedges2 = new tedge_t<T>[new_nnz]; #pragma omp parallel for for(unsigned long i = 0 ; i < new_nnz ; i++) { tedges2[i].src = received_edges[i].src; tedges2[i].dst = received_edges[i].dst; tedges2[i].val = received_edges[i].val; int ival, jval; // if(global_nrank == 1) // tedges2[i].tile_id = 0; // else tedges2[i].tile_id = getPartition(received_edges[i].src, received_edges[i].dst, &ival, &jval); if(tedges2[i].tile_id == -1) { printf("src:%d dst:%d val:%d\n", tedges2[i].src, tedges2[i].dst, tedges2[i].val); } assert(tedges2[i].tile_id != -1); } __gnu_parallel::sort(tedges2, tedges2 + new_nnz , compare_tile_id<T>); int * assignment2 = new int[new_nnz]; #pragma omp parallel for for(unsigned long i = 0 ; i < new_nnz ; i++) { received_edges[i].src = tedges2[i].src; received_edges[i].dst = tedges2[i].dst; received_edges[i].val = tedges2[i].val; assignment2[i] = tedges2[i].tile_id; } delete [] tedges2; for (int tile_j = 0; tile_j < ntiles_x; tile_j++) { for (int tile_i = 0; tile_i < ntiles_y; tile_i++) { if (nodeIds[tile_i + tile_j * ntiles_y] == global_myrank) { int tile_m = start_idy[tile_i + 1] - start_idy[tile_i]; int tile_n = start_idx[tile_j + 1] - start_idx[tile_j]; int this_tile_id = tile_i + tile_j * ntiles_y; // Find left and right int start_nz = binary_search_left_border(assignment2, this_tile_id, 0, new_nnz, new_nnz); int end_nz = binary_search_right_border(assignment2, this_tile_id, 0, new_nnz, new_nnz); int nnz = 0; if((start_nz != -1) && (end_nz != -1)) { nnz = (end_nz+1) - start_nz; } if (nnz <= 0) { tiles[tile_i][tile_j] = new SpTile(tile_m, tile_n); } else { tiles[tile_i][tile_j] = new SpTile(received_edges + start_nz, tile_m, tile_n, nnz, start_idy[tile_i], start_idx[tile_j]); } } } } delete [] assignment2; delete [] received_edges; MPI_Barrier(MPI_COMM_WORLD); } void Allocate2DPartitioned(int m, int n, int _ntiles_x, int _ntiles_y, int (*pfn)(int, int, int, int, int)) { int global_nrank = get_global_nrank(); int global_myrank = get_global_myrank(); ntiles_x = _ntiles_x; ntiles_y = _ntiles_y; assert(ntiles_x > 0); assert(ntiles_y > 0); this->m = m; this->n = n; int vx, vy; int roundup = 256; vx = ((((n + ntiles_x - 1) / ntiles_x) + roundup - 1) / roundup) * roundup; vy = ((((m + ntiles_y - 1) / ntiles_y) + roundup - 1) / roundup) * roundup; for (int j = 0; j < ntiles_x; j++) { for (int i = 0; i < ntiles_y; i++) { nodeIds.push_back(pfn(j, i, ntiles_x, ntiles_y, global_nrank)); } } for (int j = 0; j < ntiles_x; j++) { start_idx.push_back(std::min(vx * j, n)); } for (int i = 0; i < ntiles_y; i++) { start_idy.push_back(std::min(vy * i, m)); } start_idx.push_back(n); start_idy.push_back(m); // Allocate space for tiles for (int tile_i = 0; tile_i < ntiles_y; tile_i++) { std::vector<SpTile*> tmp; for (int tile_j = 0; tile_j < ntiles_x; tile_j++) { tmp.push_back((SpTile*)NULL); } tiles.push_back(tmp); } } SpMat() {} template <typename T> SpMat(edgelist_t<T> edgelist, int ntx, int nty, int (*pfn)(int, int, int, int, int)) { Allocate2DPartitioned(edgelist.m, edgelist.n, ntx, nty, pfn); ingestEdgelist(edgelist); } ~SpMat() { for(auto it1 = tiles.begin() ; it1 != tiles.end() ; it1++) { for(auto it2 = it1->begin() ; it2 != it1->end() ; it2++) { delete *it2; } } } template <typename T> void get_edges(edgelist_t<T>* edgelist) const { int global_nrank = get_global_nrank(); int global_myrank = get_global_myrank(); // Get nnz int nnzs = 0; for (int i = 0; i < ntiles_y; i++) { for (int j = 0; j < ntiles_x; j++) { if (nodeIds[i + j * ntiles_y] == global_myrank) { nnzs += tiles[i][j]->nnz; } } } edgelist->m = m; edgelist->n = n; edgelist->nnz = nnzs; if(nnzs > 0) { edgelist->edges = reinterpret_cast<edge_t<T>*>( _mm_malloc((uint64_t)nnzs * (uint64_t)sizeof(edge_t<T>), 64)); nnzs = 0; for (int i = 0; i < ntiles_y; i++) { for (int j = 0; j < ntiles_x; j++) { if (nodeIds[i + j * ntiles_y] == global_myrank) { tiles[i][j] ->get_edges(edgelist->edges + nnzs, start_idy[i], start_idx[j]); nnzs += tiles[i][j]->nnz; } } } } } uint64_t getNNZ() { int global_myrank = get_global_myrank(); uint64_t total_nnz = 0; for(int i = 0 ; i < ntiles_y ; i++) { for(int j = 0 ; j < ntiles_x ; j++) { if(nodeIds[i + j * ntiles_y] == global_myrank) { total_nnz += tiles[i][j]->nnz; } } } // global reduction MPI_Allreduce(MPI_IN_PLACE, &total_nnz, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); return total_nnz; } }; template <template <typename> class SpTile, typename T> void get_row_ranks(const SpMat<SpTile<T> >* mat, std::vector<std::set<int> >* row_ranks_out, std::vector<std::set<int> >* col_ranks_out) { for (int i = 0; i < mat->ntiles_y; i++) { // Create set of row nodeIDs std::set<int> row_ranks; for (int j = 0; j < mat->ntiles_x; j++) { row_ranks.insert(mat->nodeIds[i + j * mat->ntiles_y]); } row_ranks_out->push_back(row_ranks); } for (int j = 0; j < mat->ntiles_x; j++) { // Create set of col nodeIDs std::set<int> col_ranks; for (int i = 0; i < mat->ntiles_y; i++) { col_ranks.insert(mat->nodeIds[i + j * mat->ntiles_y]); } col_ranks_out->push_back(col_ranks); } } template <template <typename> class SpTile, typename T> void Transpose(const SpMat<SpTile<T> >* mat, SpMat<SpTile<T> >** matc, int ntx, int nty, int (*pfn)(int, int, int, int, int)) { edgelist_t<T> edgelist; mat->get_edges(&edgelist); #pragma omp parallel for for (int i = 0; i < edgelist.nnz; i++) { int tmp = edgelist.edges[i].src; edgelist.edges[i].src = edgelist.edges[i].dst; edgelist.edges[i].dst = tmp; } int tmp = edgelist.m; edgelist.m = edgelist.n; edgelist.n = tmp; (*matc) = new SpMat<SpTile<T> >(edgelist, ntx, nty, pfn); if(edgelist.nnz > 0) { _mm_free(edgelist.edges); } } #endif // SRC_SPMAT_H_
sindex_query_old.h
#include <assert.h> #include <math.h> #include "helpers.h" #include "globals.h" #include <iostream> #include <algorithm> #include <chrono> #include <immintrin.h> #ifndef _SINDEXQUERYOLD_ #define _SINDEXQUERYOLD_ inline uint32_t query_range( ky_t* dev_key, ky_t &key, ky_t* keys, uint32_t query_start, uint32_t query_end, uint32_t left, uint32_t mid, uint32_t right, ky_t* query_buffer, uint32_t querysize, int_t* dev_pos, int_t &hst_pos) { QueryStatus result; do { assert(cudaMemcpy(query_buffer, keys + mid, querysize * sizeof(ky_t), cudaMemcpyHostToDevice) == cudaSuccess); assert(cudaMemcpy(dev_pos, &int_max, sizeof(int_t), cudaMemcpyHostToDevice) == cudaSuccess); // run kernel for mid buffer in the mean time if (right != UINT32_MAX) { querysize = (right - mid < QUERYSIZE) ? right - mid : QUERYSIZE; } else { querysize = (query_end - mid < QUERYSIZE) ? query_end - mid : QUERYSIZE; } query_kernel <<<get_block_num(querysize), BLOCKSIZE, BLOCKSIZE / 32 * sizeof(int_t)>>> (dev_key, query_buffer, querysize, dev_pos); assert(cudaGetLastError() == cudaSuccess); // get result from mid buffer assert(cudaMemcpy(&hst_pos, dev_pos, sizeof(int_t), cudaMemcpyDeviceToHost) == cudaSuccess); // evaluate result if (hst_pos != UINT32_MAX) { if (memcmp(&key, keys + mid + hst_pos, sizeof(ky_t)) == 0) { result = found_target; return mid + hst_pos; } else if (hst_pos > 0) { if (memcmp(&key, keys + mid + hst_pos, sizeof(ky_t)) < 0) { --hst_pos; } result = found_target; return mid + hst_pos; } else { result = target_left; } } else { result = target_right; } if (result != found_target) { switch (result) { case target_left: query_end = mid; mid = left; break; case target_right: query_start = mid + QUERYSIZE; mid = right; break; } if (query_end - query_start <= QUERYSIZE) { if (mid > query_start) { left = query_start; } else { left = UINT32_MAX; } right = UINT32_MAX; } else if (query_end - query_start <= 2 * QUERYSIZE) { if (query_start < mid) { left = query_start; } else { left = UINT32_MAX; } if (query_start + QUERYSIZE < mid) { right = query_start + QUERYSIZE; } else { right = UINT32_MAX; } } else { left = (mid + query_start - QUERYSIZE) / 2; right = (query_end + mid + QUERYSIZE) / 2; } } } while (result != found_target); } inline uint32_t get_position_from_group( const group_t* group, ky_t &key, ky_t* keys, ky_t* dev_key, int_t* dev_pos, ky_t* query_buffer) { // determine query range fp_t prediction = 0; for (ky_size_t feat_i = 0; feat_i < group->n + 1; ++feat_i) { if (feat_i == group->n) { prediction += *(group->weights + feat_i); } else { ky_size_t char_i = *(group->feat_indices + feat_i); ch_t character = *(((ch_t*) key) + char_i); fp_t weight = *(group->weights + feat_i); prediction += weight * ((fp_t) character); } } uint32_t query_start; uint32_t query_end; uint32_t left; uint32_t right; uint32_t mid; uint32_t querysize; // shift query borders if ((int64_t) (prediction - group->left_err) - 1 < (int64_t) group->start || prediction - group->left_err - 1 < 0) { query_start = group->start; } else if ((int64_t) (prediction - group->left_err) - 1 > (int64_t) (group->start + group->m)) { return group->start + group->m - 1; } else { query_start = (uint32_t) (prediction - group->left_err) - 1; } if ((int64_t) ceil(prediction - group->right_err) + 1 < (int64_t) group->start || ceil(prediction - group->right_err) + 1 < 0) { return group->start; } else if ((int64_t) ceil(prediction - group->right_err) + 1 > (int64_t) (group->start + group->m)) { query_end = group->start + group->m; } else { query_end = ceil(prediction - group->right_err) + 1; } int_t hst_pos; if (query_start == query_end - 1) { hst_pos = query_start; } else { if (prediction < group->start) { prediction = group->start; } else if (prediction >= group->start + group->m) { prediction = group->start + group->m - 1; } // kernel indices if (query_end - query_start <= QUERYSIZE) { left = UINT32_MAX; right = UINT32_MAX; mid = query_start; } else if (query_end - query_start <= 2 * QUERYSIZE) { if (prediction < query_start + QUERYSIZE) { left = UINT32_MAX; mid = query_start; right = query_start + QUERYSIZE; } else { left = query_start; mid = query_end - QUERYSIZE; right = UINT32_MAX; } } else { if (prediction - query_start < 0.5 * QUERYSIZE) { left = UINT32_MAX; mid = query_start; right = (query_end + mid + QUERYSIZE) / 2; } else if (query_end - prediction < 0.5 * QUERYSIZE) { right = UINT32_MAX; mid = query_end - QUERYSIZE - 1; left = (mid + query_start - QUERYSIZE) / 2; } else { mid = (uint32_t) (prediction - QUERYSIZE / 2); querysize = (mid - query_start < QUERYSIZE) ? mid - query_start : QUERYSIZE; left = (mid + query_start - querysize) / 2; querysize = (query_end - mid < QUERYSIZE) ? query_end - mid : QUERYSIZE; right = (query_end + mid + querysize) / 2; } } if (right != UINT32_MAX) { querysize = (right - mid < QUERYSIZE) ? right - mid : QUERYSIZE; } else { querysize = (query_end - mid < QUERYSIZE) ? query_end - mid : QUERYSIZE; } hst_pos = query_range( dev_key, key, keys, query_start, query_end, left, mid, right, query_buffer, querysize, dev_pos, hst_pos ); } return hst_pos; } inline uint32_t get_position_from_index( const index_t* index, ky_t &key, ky_t* keys, ky_t* dev_key, int_t* dev_pos, ky_t* query_buffer) { int_t hst_pos = UINT32_MAX; assert(cudaMemcpy(dev_key, &key, sizeof(ky_t), cudaMemcpyHostToDevice) == cudaSuccess); uint32_t query_start; uint32_t query_end; uint32_t left; uint32_t right; uint32_t mid; uint32_t querysize; if (index->root_n > 0) { query_start = 0; query_end = index->root_n; if (query_start == query_end - 1) { hst_pos = query_start; } else { // kernel indices if (query_end - query_start <= QUERYSIZE) { left = UINT32_MAX; right = UINT32_MAX; mid = query_start; } else if (query_end - query_start <= 2 * QUERYSIZE) { left = UINT32_MAX; mid = query_start; right = query_end - QUERYSIZE; } else { mid = (uint32_t) (query_end - query_start - QUERYSIZE / 2); left = (mid + query_start - QUERYSIZE) / 2; right = (query_end + mid + QUERYSIZE) / 2; } if (right != UINT32_MAX) { querysize = (right - mid < QUERYSIZE) ? right - mid : QUERYSIZE; } else { querysize = (query_end - mid < QUERYSIZE) ? query_end - mid : QUERYSIZE; } hst_pos = query_range( dev_key, key, index->root_pivots, query_start, query_end, left, mid, right, query_buffer, querysize, dev_pos, hst_pos ); } } else { hst_pos = 0; } group_t* root = index->roots + hst_pos; hst_pos = get_position_from_group( root, key, index->group_pivots, dev_key, dev_pos, query_buffer ); group_t* group = index->groups + hst_pos; hst_pos = get_position_from_group( group, key, keys, dev_key, dev_pos,query_buffer ); return hst_pos; } ///////////////////////////// ///////////////////////////// inline uint32_t search(ky_t* key, ky_t* keys, uint32_t query_start, uint32_t query_end) { uint32_t pos = query_start; assert(query_start <= query_end); assert(query_end - query_start <= CPUCORES); //#pragma omp parallel for num_threads(query_end - query_start) reduction(max:pos) for (uint32_t thread_i = query_start; thread_i < query_end; ++thread_i) { if (memcmp(keys + thread_i, key, sizeof(ky_t)) <= 0) { pos = std::max(pos, thread_i); } } return pos; } inline uint32_t exponential( ky_t *key, ky_t* keys, uint32_t query_start, uint32_t query_end) { uint32_t pos; uint32_t exponent = 0; uint32_t boundary_exponent = 0; uint32_t boundary = query_start; bool finished = false; while (!finished) { //#pragma omp parallel for num_threads(CPUCORES) reduction(max:boundary_exponent) for (uint32_t thread_i = 0; thread_i < CPUCORES; ++thread_i) { int64_t index; int64_t cmp; if (query_start < query_end) { index = query_start + pow(2, exponent + thread_i); if (index < query_end) { cmp = memcmp(keys + index, key, sizeof(ky_t)); } else { cmp = 1; } } else { index = query_start - pow(2, exponent + thread_i); if (index > query_end) { cmp = memcmp(key, keys + index, sizeof(ky_t)); } else { cmp = 1; } } if (cmp <= 0) { boundary_exponent = std::max(exponent + thread_i, boundary_exponent); } else { finished = true; } } exponent += CPUCORES; } uint32_t power = pow(2, boundary_exponent + 1); if (query_start > query_end) { boundary -= power; if (boundary < query_end) { boundary = query_end; } } else { boundary += power; if (boundary > query_end) { boundary = query_end; } } return boundary; } inline uint32_t binary( ky_t* key, ky_t* keys, uint32_t query_start, uint32_t query_end) { assert(query_start <= query_end); uint32_t pos = query_start; while(query_start + CPUCORES < query_end) { uint32_t interval_len = safe_division(query_end - query_start, CPUCORES); #pragma omp parallel for num_threads(CPUCORES) reduction(max:pos) for (uint32_t thread_i = 0; thread_i < CPUCORES; ++thread_i) { uint32_t index = query_start + thread_i * interval_len; if (memcmp(keys + index, key, sizeof(ky_t)) <= 0) { pos = std::max(pos, index); } } query_start = pos; query_end = query_start + interval_len; } pos = search(key, keys, query_start, query_end); return pos; } inline fp_t predict256( ky_t* key, ky_size_t* feat_indices, fp_t* weights, ky_size_t n) { // debug fp_t prediction2 = 0; for (ky_size_t feat_i = 0; feat_i < n + 1; ++feat_i) { if (feat_i == n) { prediction2 += *(weights + feat_i); } else { ky_size_t char_i = *(feat_indices + feat_i); ch_t character = *(((ch_t*) key) + char_i); fp_t weight = *(weights + feat_i); prediction2 += weight * ((fp_t) character); } } // copy key values into double array fp_t key_vals[n]; //#pragma omp parallel for num_threads(CPUCORES) for (ky_size_t feat_i = 0; feat_i < n; ++feat_i) { *(key_vals + feat_i) = *(((ch_t*) key) + *(feat_indices + feat_i)); } fp_t prediction = 0; // check if registers are necessary if (n < 4) { //#pragma omp parallel for num_threads(3) reduction(+:prediction) for (ky_size_t feat_i = 0; feat_i < n; ++feat_i) { prediction += *(key_vals + feat_i) * *(weights + feat_i); } } else { __m256d K[CPUCORES]; __m256d W[CPUCORES]; __m256d S = _mm256_setzero_pd(); // n divided by 4 //#pragma omp parallel for num_threads(CPUCORES) for (ky_size_t vector_i = 0; vector_i < (n >> 2); ++vector_i) { // copy to double array // load registers *(K + vector_i) = _mm256_loadu_pd(key_vals + 4 * vector_i); *(W + vector_i) = _mm256_loadu_pd(weights + 4 * vector_i); // fused multiply add S = _mm256_fmadd_pd(*(K + vector_i), *(W + vector_i), S); } S = _mm256_hadd_pd(S, S); prediction += *((fp_t*) &S) + *(((fp_t*) &S) + 2); //#pragma omp parallel for num_threads(3) reduction(+:prediction) for (size_t feat_i = (n & (~3)); feat_i < n; ++feat_i) { prediction += *(key_vals + feat_i) * *(weights + feat_i); } } // add y-shift prediction += *(weights + n); return prediction; } inline uint32_t query_group(ky_t* key, group_t* group, ky_t* keys) { uint32_t pos; // calculate prediction fp_t prediction = predict256(key, group->feat_indices, group->weights, group->n); // set boundaries by error // boundaries are last possible indices ! // int64_t left_boundary = floor(prediction - group->left_err) - 1; // int64_t right_boundary = ceil (prediction - group->right_err) + 2; // // shift boundaries into group // if (left_boundary < group->start) { // left_boundary = group->start; // } else if (left_boundary > group->start + group->m - 1) { // left_boundary = group->start + group->m - 1; // } // if (right_boundary < group->start) { // right_boundary = group->start; // } else if (right_boundary > group->start + group->m - 1) { // right_boundary = group->start + group->m - 1; // } // // // get result if boundaries are small // if (right_boundary - left_boundary < CPUCORES) { // return search(key, keys, left_boundary, right_boundary + 1); // } uint32_t left_boundary = group->start; uint32_t right_boundary = group->start + group->m - 1; // query start is first element int64_t query_start = round(prediction) - CPUCORES / 2; // shift query start if (query_start < left_boundary) { query_start = left_boundary; } else if (query_start > right_boundary + 1) { query_start = right_boundary + 1; } // query end is last element not in query int64_t query_end = query_start + CPUCORES; // shift query end if (query_end < left_boundary) { query_end = left_boundary; } else if (query_end > right_boundary + 1) { query_end = right_boundary + 1; } // search around prediction pos = search(key, keys, query_start, query_end); // position found if (pos > query_start && pos < query_end - 1 || memcmp(key, keys + pos, sizeof(ky_t)) == 0 || pos == right_boundary || pos < right_boundary && memcmp(key, keys + pos, sizeof(ky_t)) > 0 && memcmp(key, keys + pos + 1, sizeof(ky_t)) < 0) { return pos; } uint32_t boundary; // determine range and direction if (pos == query_start) { // search left --query_start; boundary = left_boundary; } else if (pos == query_start + CPUCORES - 1) { // search right query_start = query_start + CPUCORES; boundary = right_boundary; } if (abs(boundary - query_start) < CPUCORES) { if (query_start < boundary) { pos = search(key, keys, query_start, boundary + 1); } else { pos = search(key, keys, boundary, query_start + 1); } return pos; } boundary = exponential(key, keys, query_start, boundary); if (query_start < boundary) { pos = binary(key, keys, query_start, boundary + 1); } else { pos = binary(key, keys, boundary, query_start + 1); } return pos; } inline uint32_t get_position_from_index2(const index_t* index, ky_t* key, ky_t* keys) { uint32_t pos; pos = binary(key, index->root_pivots, 0, index->root_n); group_t* root_i = ((group_t*) index->roots) + pos; pos = query_group(key, root_i, index->group_pivots); group_t* group_i = ((group_t*) index->groups) + pos; pos = query_group(key, group_i, keys); return pos; } #endif // _SINDEXQUERYOLD_
convolution_3x3_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_msa(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8.create(inch / 8, 36, outch / 4, (size_t)2u * 32, 32); int q = 0; for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat kernel_tm = kernel_tm_pack8.channel(q / 4); for (int k = 0; k < 36; k++) { short* g00 = kernel_tm.row<short>(k); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { const short* k00 = k0.row<const short>(p + i); const short* k10 = k1.row<const short>(p + i); const short* k20 = k2.row<const short>(p + i); const short* k30 = k3.row<const short>(p + i); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00 += 4; } } } } } static void conv3x3s1_winograd42_pack8to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { v16i8 _r00_01 = __msa_ld_b(r0, 0); v16i8 _r02_03 = __msa_ld_b(r0 + 16, 0); v16i8 _r04_05 = __msa_ld_b(r0 + 32, 0); v16i8 _extr0001 = __msa_clti_s_b(_r00_01, 0); v16i8 _extr0203 = __msa_clti_s_b(_r02_03, 0); v16i8 _extr0405 = __msa_clti_s_b(_r04_05, 0); v8i16 _r00 = (v8i16)__msa_ilvr_b(_extr0001, _r00_01); v8i16 _r01 = (v8i16)__msa_ilvl_b(_extr0001, _r00_01); v8i16 _r02 = (v8i16)__msa_ilvr_b(_extr0203, _r02_03); v8i16 _r03 = (v8i16)__msa_ilvl_b(_extr0203, _r02_03); v8i16 _r04 = (v8i16)__msa_ilvr_b(_extr0405, _r04_05); v8i16 _r05 = (v8i16)__msa_ilvl_b(_extr0405, _r04_05); v8i16 _v5 = __msa_fill_h(5); v8i16 _tmp0m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r00, 2), _r04), __msa_mulv_h(_r02, _v5)); v8i16 _tmp1m = __msa_subv_h(__msa_addv_h(_r04, _r03), __msa_slli_h(__msa_addv_h(_r01, _r02), 2)); v8i16 _tmp2m = __msa_addv_h(__msa_subv_h(_r04, _r03), __msa_slli_h(__msa_subv_h(_r01, _r02), 2)); v8i16 _tmp3m = __msa_subv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1)); v8i16 _tmp4m = __msa_addv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1)); v8i16 _tmp5m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r01, 2), _r05), __msa_mulv_h(_r03, _v5)); __msa_st_h(_tmp0m, tmp[0][m], 0); __msa_st_h(_tmp1m, tmp[1][m], 0); __msa_st_h(_tmp2m, tmp[2][m], 0); __msa_st_h(_tmp3m, tmp[3][m], 0); __msa_st_h(_tmp4m, tmp[4][m], 0); __msa_st_h(_tmp5m, tmp[5][m], 0); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { v8i16 _tmp00 = __msa_ld_h(tmp[m][0], 0); v8i16 _tmp01 = __msa_ld_h(tmp[m][1], 0); v8i16 _tmp02 = __msa_ld_h(tmp[m][2], 0); v8i16 _tmp03 = __msa_ld_h(tmp[m][3], 0); v8i16 _tmp04 = __msa_ld_h(tmp[m][4], 0); v8i16 _tmp05 = __msa_ld_h(tmp[m][5], 0); v8i16 _v5 = __msa_fill_h(5); v8i16 _r0tm0 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp00, 2), _tmp04), __msa_mulv_h(_tmp02, _v5)); v8i16 _r0tm1 = __msa_subv_h(__msa_addv_h(_tmp04, _tmp03), __msa_slli_h(__msa_addv_h(_tmp01, _tmp02), 2)); v8i16 _r0tm2 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp03), __msa_slli_h(__msa_subv_h(_tmp01, _tmp02), 2)); v8i16 _r0tm3 = __msa_subv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1)); v8i16 _r0tm4 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1)); v8i16 _r0tm5 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp01, 2), _tmp05), __msa_mulv_h(_tmp03, _v5)); __msa_st_h(_r0tm0, r0_tm_0, 0); __msa_st_h(_r0tm1, r0_tm_1, 0); __msa_st_h(_r0tm2, r0_tm_2, 0); __msa_st_h(_r0tm3, r0_tm_3, 0); __msa_st_h(_r0tm4, r0_tm_4, 0); __msa_st_h(_r0tm5, r0_tm_5, 0); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 1 < tiles; i += 2) { short* tmpptr = tm2.row<short>(i / 2); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { v8i16 _r0 = __msa_ld_h(r0, 0); v8i16 _r1 = __msa_ld_h(r0 + 8, 0); __msa_st_h(_r0, tmpptr, 0); __msa_st_h(_r1, tmpptr + 8, 0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 16; } } for (; i < tiles; i++) { short* tmpptr = tm2.row<short>(i / 2 + i % 2); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { v8i16 _r0 = __msa_ld_h(r0, 0); __msa_st_h(_r0, tmpptr, 0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * 4, 4, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 1 < tiles; i += 2) { const short* r0 = bb2.row<const short>(i / 2); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 v4i32 _sum0 = __msa_fill_w(0); v4i32 _sum1 = __msa_fill_w(0); v4i32 _sum2 = __msa_fill_w(0); v4i32 _sum3 = __msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 64); __builtin_prefetch(k0 + 128); v8i16 _w0 = __msa_ld_h(k0, 0); v8i16 _w1 = __msa_ld_h(k0 + 8, 0); v8i16 _w2 = __msa_ld_h(k0 + 16, 0); v8i16 _w3 = __msa_ld_h(k0 + 24, 0); v8i16 _extw0 = __msa_clti_s_h(_w0, 0); v8i16 _extw1 = __msa_clti_s_h(_w1, 0); v8i16 _extw2 = __msa_clti_s_h(_w2, 0); v8i16 _extw3 = __msa_clti_s_h(_w3, 0); v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0); v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0); v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1); v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1); v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2); v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2); v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3); v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3); v4i32 _val0_0 = __msa_fill_w(r0[0]); v4i32 _val0_1 = __msa_fill_w(r0[1]); v4i32 _val0_2 = __msa_fill_w(r0[2]); v4i32 _val0_3 = __msa_fill_w(r0[3]); v4i32 _val0_4 = __msa_fill_w(r0[4]); v4i32 _val0_5 = __msa_fill_w(r0[5]); v4i32 _val0_6 = __msa_fill_w(r0[6]); v4i32 _val0_7 = __msa_fill_w(r0[7]); v4i32 _val1_0 = __msa_fill_w(r0[8]); v4i32 _val1_1 = __msa_fill_w(r0[9]); v4i32 _val1_2 = __msa_fill_w(r0[10]); v4i32 _val1_3 = __msa_fill_w(r0[11]); v4i32 _val1_4 = __msa_fill_w(r0[12]); v4i32 _val1_5 = __msa_fill_w(r0[13]); v4i32 _val1_6 = __msa_fill_w(r0[14]); v4i32 _val1_7 = __msa_fill_w(r0[15]); _sum0 = __msa_maddv_w(_sum0, _w0l, _val0_0); _sum1 = __msa_maddv_w(_sum1, _w0h, _val0_1); _sum2 = __msa_maddv_w(_sum2, _w0l, _val1_0); _sum3 = __msa_maddv_w(_sum3, _w0h, _val1_1); _sum0 = __msa_maddv_w(_sum0, _w1l, _val0_2); _sum1 = __msa_maddv_w(_sum1, _w1h, _val0_3); _sum2 = __msa_maddv_w(_sum2, _w1l, _val1_2); _sum3 = __msa_maddv_w(_sum3, _w1h, _val1_3); _sum0 = __msa_maddv_w(_sum0, _w2l, _val0_4); _sum1 = __msa_maddv_w(_sum1, _w2h, _val0_5); _sum2 = __msa_maddv_w(_sum2, _w2l, _val1_4); _sum3 = __msa_maddv_w(_sum3, _w2h, _val1_5); _sum0 = __msa_maddv_w(_sum0, _w3l, _val0_6); _sum1 = __msa_maddv_w(_sum1, _w3h, _val0_7); _sum2 = __msa_maddv_w(_sum2, _w3l, _val1_6); _sum3 = __msa_maddv_w(_sum3, _w3h, _val1_7); r0 += 16; k0 += 32; } _sum0 = __msa_addv_w(_sum0, _sum1); _sum2 = __msa_addv_w(_sum2, _sum3); __msa_st_w(_sum0, output0_tm, 0); __msa_st_w(_sum2, output0_tm + 4, 0); output0_tm += 8; } for (; i < tiles; i++) { const short* r0 = bb2.row<const short>(i / 2 + i % 2); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 v4i32 _sum0 = __msa_fill_w(0); v4i32 _sum1 = __msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(k0 + 128); v8i16 _w0 = __msa_ld_h(k0, 0); v8i16 _w1 = __msa_ld_h(k0 + 8, 0); v8i16 _w2 = __msa_ld_h(k0 + 16, 0); v8i16 _w3 = __msa_ld_h(k0 + 24, 0); v8i16 _extw0 = __msa_clti_s_h(_w0, 0); v8i16 _extw1 = __msa_clti_s_h(_w1, 0); v8i16 _extw2 = __msa_clti_s_h(_w2, 0); v8i16 _extw3 = __msa_clti_s_h(_w3, 0); v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0); v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0); v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1); v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1); v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2); v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2); v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3); v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3); v4i32 _val0 = __msa_fill_w(r0[0]); v4i32 _val1 = __msa_fill_w(r0[1]); v4i32 _val2 = __msa_fill_w(r0[2]); v4i32 _val3 = __msa_fill_w(r0[3]); v4i32 _val4 = __msa_fill_w(r0[4]); v4i32 _val5 = __msa_fill_w(r0[5]); v4i32 _val6 = __msa_fill_w(r0[6]); v4i32 _val7 = __msa_fill_w(r0[7]); _sum0 = __msa_maddv_w(_sum0, _w0l, _val0); _sum1 = __msa_maddv_w(_sum1, _w0h, _val1); _sum0 = __msa_maddv_w(_sum0, _w1l, _val2); _sum1 = __msa_maddv_w(_sum1, _w1h, _val3); _sum0 = __msa_maddv_w(_sum0, _w2l, _val4); _sum1 = __msa_maddv_w(_sum1, _w2h, _val5); _sum0 = __msa_maddv_w(_sum0, _w3l, _val6); _sum1 = __msa_maddv_w(_sum1, _w3h, _val7); r0 += 8; k0 += 32; } _sum0 = __msa_addv_w(_sum0, _sum1); __msa_st_w(_sum0, output0_tm, 0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u * 4, 4, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6][4]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 4; const int* output0_tm_1 = output0_tm_0 + tiles * 4; const int* output0_tm_2 = output0_tm_0 + tiles * 8; const int* output0_tm_3 = output0_tm_0 + tiles * 12; const int* output0_tm_4 = output0_tm_0 + tiles * 16; const int* output0_tm_5 = output0_tm_0 + tiles * 20; int* output0 = out0.row<int>(i * 4) + (j * 4) * 4; for (int m = 0; m < 5; m++) { v4i32 _out0tm0 = __msa_ld_w(output0_tm_0, 0); v4i32 _out0tm1 = __msa_ld_w(output0_tm_1, 0); v4i32 _out0tm2 = __msa_ld_w(output0_tm_2, 0); v4i32 _out0tm3 = __msa_ld_w(output0_tm_3, 0); v4i32 _out0tm4 = __msa_ld_w(output0_tm_4, 0); v4i32 _out0tm5 = __msa_ld_w(output0_tm_5, 0); v4i32 _tmp02a = __msa_addv_w(_out0tm1, _out0tm2); v4i32 _tmp13a = __msa_subv_w(_out0tm1, _out0tm2); v4i32 _tmp02b = __msa_addv_w(_out0tm3, _out0tm4); v4i32 _tmp13b = __msa_subv_w(_out0tm3, _out0tm4); v4i32 _tmp0m = __msa_addv_w(__msa_addv_w(_out0tm0, _tmp02a), _tmp02b); v4i32 _tmp1m = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1)); v4i32 _tmp2m = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2)); v4i32 _tmp3m = __msa_addv_w(__msa_addv_w(_tmp13a, __msa_slli_w(_out0tm5, 2)), __msa_slli_w(_tmp13b, 3)); __msa_st_w(_tmp0m, tmp[0][m], 0); __msa_st_w(_tmp1m, tmp[1][m], 0); __msa_st_w(_tmp2m, tmp[2][m], 0); __msa_st_w(_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 5; m < 6; m++) { v4i32 _out0tm0 = __msa_ld_w(output0_tm_0, 0); v4i32 _out0tm1 = __msa_ld_w(output0_tm_1, 0); v4i32 _out0tm2 = __msa_ld_w(output0_tm_2, 0); v4i32 _out0tm3 = __msa_ld_w(output0_tm_3, 0); v4i32 _out0tm4 = __msa_ld_w(output0_tm_4, 0); v4i32 _out0tm5 = __msa_ld_w(output0_tm_5, 0); v4i32 _tmp02a = __msa_addv_w(_out0tm1, _out0tm2); v4i32 _tmp13a = __msa_subv_w(_out0tm1, _out0tm2); v4i32 _tmp02b = __msa_addv_w(_out0tm3, _out0tm4); v4i32 _tmp13b = __msa_subv_w(_out0tm3, _out0tm4); v4i32 _tmp0m = __msa_addv_w(__msa_addv_w(_out0tm0, _tmp02a), _tmp02b); v4i32 _tmp1m = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1)); v4i32 _tmp2m = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2)); v4i32 _tmp3m = __msa_addv_w(__msa_addv_w(_tmp13a, __msa_slli_w(_out0tm5, 2)), __msa_slli_w(_tmp13b, 3)); _tmp0m = __msa_slli_w(_tmp0m, 2); _tmp1m = __msa_slli_w(_tmp1m, 2); _tmp2m = __msa_slli_w(_tmp2m, 2); _tmp3m = __msa_slli_w(_tmp3m, 2); __msa_st_w(_tmp0m, tmp[0][m], 0); __msa_st_w(_tmp1m, tmp[1][m], 0); __msa_st_w(_tmp2m, tmp[2][m], 0); __msa_st_w(_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { v4i32 _tmp00 = __msa_ld_w(tmp[m][0], 0); v4i32 _tmp01 = __msa_ld_w(tmp[m][1], 0); v4i32 _tmp02 = __msa_ld_w(tmp[m][2], 0); v4i32 _tmp03 = __msa_ld_w(tmp[m][3], 0); v4i32 _tmp04 = __msa_ld_w(tmp[m][4], 0); v4i32 _tmp05 = __msa_ld_w(tmp[m][5], 0); v4i32 _tmp02a = __msa_addv_w(_tmp01, _tmp02); v4i32 _tmp13a = __msa_subv_w(_tmp01, _tmp02); v4i32 _tmp02b = __msa_addv_w(_tmp03, _tmp04); v4i32 _tmp13b = __msa_subv_w(_tmp03, _tmp04); v4i32 _out00 = __msa_addv_w(__msa_addv_w(_tmp00, _tmp02a), _tmp02b); v4i32 _out01 = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1)); v4i32 _out02 = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2)); v4i32 _out03 = __msa_addv_w(__msa_addv_w(_tmp05, _tmp13a), __msa_slli_w(_tmp13b, 3)); // TODO use integer trick for division by 576 v4f32 _v576 = __msa_fill_w_f32(1.0 / 576); _out00 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out00), _v576)); _out01 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out01), _v576)); _out02 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out02), _v576)); _out03 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out03), _v576)); __msa_st_w(_out00, output0, 0); __msa_st_w(_out01, output0 + 4, 0); __msa_st_w(_out02, output0 + 8, 0); __msa_st_w(_out03, output0 + 12, 0); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
SE1P_direct_fd.c
#include "mex.h" #include "SE_direct.h" #include "mathint.h" #define IDX prhs[0] #define X prhs[1] // Source locations #define Q prhs[2] // Source strengths #define OPT prhs[3] // Parameters #define PHI plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif /* common option-unpacking */ void unpack_opt(ewald_opts* opt, const mxArray* mx_opt) { // mandatory options -- will trigger core dump if missing opt->xi = mxGetScalar(mxGetField(mx_opt,0,"xi")); if(opt->xi==0) mexErrMsgTxt("xi cannot be zero"); double* box = mxGetPr(mxGetField(mx_opt,0,"box")); opt->box[0] = box[0]; // layers: mandatory for ewald sums that are truncated const mxArray* mx_layers = mxGetField(mx_opt,0,"layers"); if(mx_layers) opt->layers = (int)mxGetScalar(mx_layers); else opt->layers = -1; } // MATLAB (one-based, doubles) to C (zero-based, integers) index translation void index_translation(int* idx, const double* idx_d, int N) { for(int i=0; i<N; i++) idx[i] = (int)idx_d[i] - 1; } #ifdef FORCE void SE1P_direct_fd(double* restrict force, const int* restrict idx, int nidx, const double* restrict x, const double* restrict q, int N, const ewald_opts opt) { const double xi = opt.xi; double xi2 = xi*xi; double TwoPiOverL = 2.*PI/opt.box[0]; #ifdef _OPENMP #pragma omp parallel for #endif for(int m=0; m<nidx; m++) { double xm[] = {x[idx[m]],x[idx[m]+N],x[idx[m]+2*N]}; double f[] = {0, 0, 0}; for(int n = 0; n<N; n++) { double rvec[] = {xm[0]-x[n],xm[1]-x[n+N], xm[2]-x[n+2*N]}; double rho2 = rvec[1]*rvec[1] + rvec[2]*rvec[2]; double b = rho2*xi2; double qn = q[n]; for(int j0 = -opt.layers; j0<=opt.layers; j0++) { if(j0 == 0) continue; double k = TwoPiOverL*j0; double kr = -k*rvec[0]; double a = k*k/(4.*xi2); double K0; K0 = computeINCBK0(a,b,0); f[0] += -qn*k*sin(kr)*K0; K0 = computeINCBK0(a,b,1); f[1] += 2.*qn*xi2*cos(kr)*rvec[1]*K0; f[2] += 2.*qn*xi2*cos(kr)*rvec[2]*K0; } } force[m ] = -f[0]/(opt.box[0]); force[m+ nidx] = -f[1]/(opt.box[0]); force[m+2*nidx] = -f[2]/(opt.box[0]); } /* gsl_integration_workspace_free (w); */ } #else void SE1P_direct_fd(double* restrict phi, const int* restrict idx, int nidx, const double* restrict x, const double* restrict q, int N, const ewald_opts opt) { double p; const double xi = opt.xi; double xi2 = xi*xi; double TwoPiOverL = 2.*PI/opt.box[0]; // int rep; #ifdef _OPENMP #pragma omp parallel for private(p) #endif for(int m=0; m<nidx; m++) { double xm[3] = {x[idx[m] ], x[idx[m]+N ], x[idx[m]+2*N]}; p = 0; for(int j0 = 1; j0<=opt.layers; j0++) { double k = TwoPiOverL*j0; double a = k*k/(4.*xi2); for(int n = 0; n<N; n++) { double r = xm[0]-x[n]; double rho2= ( (xm[1]-x[n+N ])*(xm[1]-x[n+N ])+ (xm[2]-x[n+2*N])*(xm[2]-x[n+2*N]) ); double b = rho2*xi2; double qn = q[n]; double K0 = computeK0(a,b); double kr = -k*r; p += 2*qn*cos(kr)*K0; } } phi[m] = p/(opt.box[0]); } } #endif /* no input checking is done */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { // input dims const int N = mxGetM(X); const int num_eval = mxGetN(IDX); // FIXME: indices assumed to be row vec const double* idx_d = mxGetPr(IDX); int* idx = mxMalloc(num_eval*sizeof(int)); index_translation(idx, idx_d, num_eval); const double* x = mxGetPr(X); const double* q = mxGetPr(Q); #ifndef FORCE PHI = mxCreateDoubleMatrix(num_eval, 1, mxREAL); double* restrict phi = mxGetPr(PHI); #else /* This is to allocate 3 vectors for the force. * (FIXME) Note that the variable is still called PHI.*/ PHI = mxCreateDoubleMatrix(num_eval, 3, mxREAL); double* restrict phi = mxGetPr(PHI); #endif ewald_opts opt; unpack_opt(&opt, OPT); if(VERBOSE) { mexPrintf("[EWALD (%s)] MEX N=(%d,%d) ","FD1P",N,num_eval); mexPrintf("xi = %.2f, layers=%d\n", opt.xi,opt.layers); } // call kernel SE1P_direct_fd(phi, idx, num_eval, x, q, N, opt); mxFree(idx); }
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p*inch*9; const float* k1 = kernel + (p+1)*inch*9; for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr0n = outptr0 + outw; float* outptr1n = outptr1 + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0+3); float32x4_t _k06 = vld1q_f32(k0+6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1+3); float32x4_t _k16 = vld1q_f32(k1+6); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n"// r0 "add %5, %5, #16 \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v14.4s, v15.4s}, [%8] \n"// r3 "add %8, %8, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n"// _sum0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n"// _sum1 "fmla v6.4s, v8.4s, %18.s[0] \n" "fmla v7.4s, v8.4s, %21.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v12.4s}, [%3] \n"// _sum0n "prfm pldl1keep, [%4, #128] \n" "ld1 {v13.4s}, [%4] \n"// _sum1n "fmla v12.4s, v14.4s, %20.s[0] \n" "fmla v13.4s, v14.4s, %23.s[0] \n" "ext v8.16b, v8.16b, v9.16b, #8 \n" "ext v9.16b, v14.16b, v15.16b, #4 \n" "fmla v6.4s, v10.4s, %18.s[1] \n" "fmla v7.4s, v10.4s, %21.s[1] \n" "fmla v12.4s, v11.4s, %20.s[2] \n" "fmla v13.4s, v11.4s, %23.s[2] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v14.4s, v15.4s}, [%6] \n"// r1 "add %6, %6, #16 \n" "fmla v6.4s, v8.4s, %18.s[2] \n" "fmla v7.4s, v8.4s, %21.s[2] \n" "fmla v12.4s, v9.4s, %20.s[1] \n" "fmla v13.4s, v9.4s, %23.s[1] \n" "ext v10.16b, v14.16b, v15.16b, #4 \n" "fmla v6.4s, v14.4s, %19.s[0] \n" "fmla v7.4s, v14.4s, %22.s[0] \n" "fmla v12.4s, v14.4s, %18.s[0] \n" "fmla v13.4s, v14.4s, %21.s[0] \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "fmla v6.4s, v10.4s, %19.s[1] \n" "fmla v7.4s, v10.4s, %22.s[1] \n" "fmla v12.4s, v10.4s, %18.s[1] \n" "fmla v13.4s, v10.4s, %21.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v8.4s, v9.4s}, [%7] \n"// r2 "add %7, %7, #16 \n" "fmla v6.4s, v11.4s, %19.s[2] \n" "fmla v7.4s, v11.4s, %22.s[2] \n" "fmla v12.4s, v11.4s, %18.s[2] \n" "fmla v13.4s, v11.4s, %21.s[2] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "fmla v6.4s, v8.4s, %20.s[0] \n" "fmla v7.4s, v8.4s, %23.s[0] \n" "fmla v12.4s, v8.4s, %19.s[0] \n" "fmla v13.4s, v8.4s, %22.s[0] \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %20.s[1] \n" "fmla v7.4s, v10.4s, %23.s[1] \n" "fmla v12.4s, v10.4s, %19.s[1] \n" "fmla v13.4s, v10.4s, %22.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n"// r0 "add %5, %5, #16 \n" "fmla v6.4s, v11.4s, %20.s[2] \n" "fmla v7.4s, v11.4s, %23.s[2] \n" "fmla v12.4s, v11.4s, %19.s[2] \n" "fmla v13.4s, v11.4s, %22.s[2] \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v14.4s, v15.4s}, [%8] \n"// r3 "add %8, %8, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "st1 {v12.4s}, [%3], #16 \n" "st1 {v13.4s}, [%4], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %5, %5, #16 \n" "sub %8, %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n"// r0 "add %5, #16 \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n"// r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q14, q15, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :64] \n"// _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :64] \n"// _sum1 "vmla.f32 q6, q8, %e18[0] \n" "vmla.f32 q7, q8, %e21[0] \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3] \n"// _sum0n "pld [%4, #128] \n" "vld1.f32 {d26-d27}, [%4] \n"// _sum1n "vmla.f32 q12, q14, %e20[0] \n" "vmla.f32 q13, q14, %e23[0] \n" "vext.32 q8, q8, q9, #2 \n" "vext.32 q9, q14, q15, #1 \n" "vmla.f32 q6, q10, %e18[1] \n" "vmla.f32 q7, q10, %e21[1] \n" "vmla.f32 q12, q11, %f20[0] \n" "vmla.f32 q13, q11, %f23[0] \n" "pld [%6, #192] \n" "vld1.f32 {d28-d30}, [%6] \n"// r1 "add %6, #16 \n" "vmla.f32 q6, q8, %f18[0] \n" "vmla.f32 q7, q8, %f21[0] \n" "vmla.f32 q12, q9, %e20[1] \n" "vmla.f32 q13, q9, %e23[1] \n" "vext.32 q10, q14, q15, #1 \n" "vmla.f32 q6, q14, %e19[0] \n" "vmla.f32 q7, q14, %e22[0] \n" "vmla.f32 q12, q14, %e18[0] \n" "vmla.f32 q13, q14, %e21[0] \n" "vext.32 q11, q14, q15, #2 \n" "vmla.f32 q6, q10, %e19[1] \n" "vmla.f32 q7, q10, %e22[1] \n" "vmla.f32 q12, q10, %e18[1] \n" "vmla.f32 q13, q10, %e21[1] \n" "pld [%7, #192] \n" "vld1.f32 {d16-d18}, [%7 :64] \n"// r2 "add %7, #16 \n" "vmla.f32 q6, q11, %f19[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q12, q11, %f18[0] \n" "vmla.f32 q13, q11, %f21[0] \n" "vext.32 q10, q8, q9, #1 \n" "vmla.f32 q6, q8, %e20[0] \n" "vmla.f32 q7, q8, %e23[0] \n" "vmla.f32 q12, q8, %e19[0] \n" "vmla.f32 q13, q8, %e22[0] \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e20[1] \n" "vmla.f32 q7, q10, %e23[1] \n" "vmla.f32 q12, q10, %e19[1] \n" "vmla.f32 q13, q10, %e22[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n"// r0 "add %5, #16 \n" "vmla.f32 q6, q11, %f20[0] \n" "vmla.f32 q7, q11, %f23[0] \n" "vmla.f32 q12, q11, %f19[0] \n" "vmla.f32 q13, q11, %f22[0] \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n"// r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vst1.f32 {d12-d13}, [%1 : 64]!\n" "vst1.f32 {d14-d15}, [%2 : 64]!\n" "vext.32 q11, q14, q15, #2 \n" "vst1.f32 {d24-d25}, [%3]! \n" "vst1.f32 {d26-d27}, [%4]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %5, #16 \n" "sub %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); float32x4_t _sum0n = vmulq_f32(_r10, _k00); float32x4_t _sum1n = vmulq_f32(_r10, _k10); _sum0n = vmlaq_f32(_sum0n, _r20, _k03); _sum1n = vmlaq_f32(_sum1n, _r20, _k13); _sum0n = vmlaq_f32(_sum0n, _r30, _k06); _sum1n = vmlaq_f32(_sum1n, _r30, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); _sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3); _sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); *outptr0n = vaddvq_f32(_sum0n); *outptr1n = vaddvq_f32(_sum1n); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n)); float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); *outptr0n = vget_lane_f32(_ss01n, 0); *outptr1n = vget_lane_f32(_ss01n, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum0n = 0.f; float sum1 = 0.f; float sum1n = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; sum0n += r1[0] * k0[0]; sum0n += r1[1] * k0[1]; sum0n += r1[2] * k0[2]; sum0n += r2[0] * k0[3]; sum0n += r2[1] * k0[4]; sum0n += r2[2] * k0[5]; sum0n += r3[0] * k0[6]; sum0n += r3[1] * k0[7]; sum0n += r3[2] * k0[8]; sum1n += r1[0] * k1[0]; sum1n += r1[1] * k1[1]; sum1n += r1[2] * k1[2]; sum1n += r2[0] * k1[3]; sum1n += r2[1] * k1[4]; sum1n += r2[2] * k1[5]; sum1n += r3[0] * k1[6]; sum1n += r3[1] * k1[7]; sum1n += r3[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; *outptr0n += sum0n; *outptr1n += sum1n; #endif // __ARM_NEON r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr0n++; outptr1n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr1 += outw; outptr0n += outw; outptr1n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n"// r0 "add %3, %3, #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n"// _sum0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n"// _sum1 "fmul v14.4s, v8.4s, %12.s[0] \n" "fmul v15.4s, v8.4s, %15.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %12.s[1] \n" "fmla v7.4s, v10.4s, %15.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4] \n"// r1 "add %4, %4, #16 \n" "fmla v14.4s, v11.4s, %12.s[2] \n" "fmla v15.4s, v11.4s, %15.s[2] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "fmla v7.4s, v8.4s, %16.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v14.4s, v10.4s, %13.s[1] \n" "fmla v15.4s, v10.4s, %16.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n"// r2 "add %5, %5, #16 \n" "fmla v6.4s, v11.4s, %13.s[2] \n" "fmla v7.4s, v11.4s, %16.s[2] \n" "fmla v14.4s, v8.4s, %14.s[0] \n" "fmla v15.4s, v8.4s, %17.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %14.s[1] \n" "fmla v7.4s, v10.4s, %17.s[1] \n" "fmla v14.4s, v11.4s, %14.s[2] \n" "fmla v15.4s, v11.4s, %17.s[2] \n" "fadd v6.4s, v6.4s, v14.4s \n" "fadd v7.4s, v7.4s, v15.4s \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r0 "add %3, #16 \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n"// _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n"// _sum1 "vmul.f32 q14, q8, %e12[0] \n" "vmul.f32 q15, q8, %e15[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e12[1] \n" "vmla.f32 q7, q10, %e15[1] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q14, q11, %f12[0] \n" "vmla.f32 q15, q11, %f15[0] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q14, q10, %e13[1] \n" "vmla.f32 q15, q10, %e16[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5] \n"// r2 "add %5, #16 \n" "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "vmla.f32 q14, q8, %e14[0] \n" "vmla.f32 q15, q8, %e17[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e14[1] \n" "vmla.f32 q7, q10, %e17[1] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmla.f32 q15, q11, %f17[0] \n" "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0++; r1++; r2++; outptr0++; outptr1++; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9; k1 += 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p*inch*9; for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k3456 = vld1q_f32(kernel0+3); float32x4_t _k6789 = vld1q_f32(kernel0+6); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v9.4s, v10.4s}, [%3] \n"// r0 "add %3, %3, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v7.4s}, [%1] \n"// _sum "fmla v7.4s, v9.4s, %14.s[0] \n" "fmul v6.4s, v11.4s, %14.s[1] \n" "fmul v13.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v9.4s, v10.4s}, [%4] \n"// r1 "add %4, %4, #16 \n" "fmla v7.4s, v9.4s, %15.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %15.s[1] \n" "fmla v13.4s, v12.4s, %15.s[2] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v8.4s}, [%2] \n"// _sum2 "fmla v8.4s, v9.4s, %14.s[0] \n" "fmul v14.4s, v11.4s, %14.s[1] \n" "fmul v15.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v9.4s, v10.4s}, [%5] \n"// r2 "add %5, %5, #16 \n" "fmla v7.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %16.s[1] \n" "fmla v13.4s, v12.4s, %16.s[2] \n" "fmla v8.4s, v9.4s, %15.s[0] \n" "fmla v14.4s, v11.4s, %15.s[1] \n" "fmla v15.4s, v12.4s, %15.s[2] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v9.4s, v10.4s}, [%6] \n"// r3 "add %6, %6, #16 \n" "fmla v8.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v14.4s, v11.4s, %16.s[1] \n" "fmla v15.4s, v12.4s, %16.s[2] \n" "fadd v7.4s, v7.4s, v6.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v9.4s, v10.4s}, [%3] \n"// r0 "fadd v8.4s, v8.4s, v14.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v8.4s, v8.4s, v15.4s \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "add %3, %3, #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v8.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %3, %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1 :64] \n"// _sum "vmla.f32 q7, q9, %e14[0] \n" "vmul.f32 q6, q11, %e14[1] \n" "vmul.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n"// _sum2 "vmla.f32 q8, q9, %e14[0] \n" "vmul.f32 q14, q11, %e14[1] \n" "vmul.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n"// r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n"// r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); float32x4_t _sum2 = vmulq_f32(_r10, _k0123); _sum2 = vmlaq_f32(_sum2, _r20, _k3456); _sum2 = vmlaq_f32(_sum2, _r30, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); _sum2 = vsetq_lane_f32(*outptr2, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n"// r0 "add %2, %2, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v7.4s}, [%1] \n"// _sum "fmla v7.4s, v8.4s, %10.s[0] \n" "fmul v13.4s, v10.4s, %10.s[1] \n" "fmul v14.4s, v11.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n"// r1 "add %3, %3, #16 \n" "fmla v7.4s, v8.4s, %11.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %11.s[1] \n" "fmla v14.4s, v11.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4] \n"// r2 "add %4, %4, #16 \n" "fmla v7.4s, v8.4s, %12.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %12.s[1] \n" "fmla v14.4s, v11.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n"// r0 "add %2, %2, #16 \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v7.4s, v7.4s, v14.4s \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "st1 {v7.4s}, [%1], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// _sum "vmla.f32 q7, q8, %e10[0] \n" "vmul.f32 q13, q10, %e10[1] \n" "vmul.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd4 // interleave weights int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; Mat kernel_tm2(8*8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4); #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; float* ktm2 = kernel_tm2.channel(pp); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); int q=0; #if __ARM_NEON && __aarch64__ for (; q+3<inch; q+=4) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k02 = kernel0_tm.row(q+2); const float* k03 = kernel0_tm.row(q+3); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); const float* k12 = kernel1_tm.row(q+2); const float* k13 = kernel1_tm.row(q+3); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q+1); const float* k22 = kernel2_tm.row(q+2); const float* k23 = kernel2_tm.row(q+3); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q+1); const float* k32 = kernel3_tm.row(q+2); const float* k33 = kernel3_tm.row(q+3); for (int r=0; r<16; r++) { // split into two asm blocks for gcc reject over 30 oprands :( asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k02), // %3 "=r"(k03), // %4 "=r"(k10), // %5 "=r"(k11), // %6 "=r"(k12), // %7 "=r"(k13) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k02), "4"(k03), "5"(k10), "6"(k11), "7"(k12), "8"(k13) : "cc", "memory", "v0", "v1", "v2", "v3" ); asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k20), // %1 "=r"(k21), // %2 "=r"(k22), // %3 "=r"(k23), // %4 "=r"(k30), // %5 "=r"(k31), // %6 "=r"(k32), // %7 "=r"(k33) // %8 : "0"(ktm2), "1"(k20), "2"(k21), "3"(k22), "4"(k23), "5"(k30), "6"(k31), "7"(k32), "8"(k33) : "cc", "memory", "v0", "v1", "v2", "v3" ); } } #endif // __ARM_NEON && __aarch64__ for (; q+1<inch; q+=2) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q+1); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q+1); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%7], #16 \n" "ld1 {v1.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "v0", "v1" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vld1.f32 {d2-d3}, [%6 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%7 :128]! \n" "vld1.f32 {d2-d3}, [%8 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "q0", "q1" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[0 +m] = k00[m]; ktm2[4 +m] = k01[m]; ktm2[8 +m] = k10[m]; ktm2[12+m] = k11[m]; ktm2[16+m] = k20[m]; ktm2[20+m] = k21[m]; ktm2[24+m] = k30[m]; ktm2[28+m] = k31[m]; } k00 += 4; k01 += 4; k10 += 4; k11 += 4; k20 += 4; k21 += 4; k30 += 4; k31 += 4; ktm2 += 32; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "v0", "v1" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "q0", "q1" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[0 +m] = k00[m]; ktm2[4 +m] = k10[m]; ktm2[8 +m] = k20[m]; ktm2[12+m] = k30[m]; } k00 += 4; k10 += 4; k20 += 4; k30 += 4; ktm2 += 16; #endif // __ARM_NEON } } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start); const Mat kernel0_tm = kernel_tm.channel(p); int q = 0; for (; q<inch; q++) { const float* k00 = kernel0_tm.row(q); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "st1 {v0.4s}, [%0], #16 \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "v0" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d0-d1}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "q0" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[m] = k00[m]; } k00 += 4; ktm2 += 4; #endif // __ARM_NEON } } } kernel_tm = kernel_tm2; } static void conv3x3s1_winograd64_transform_kernel_neon5(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd5 // interleave weights // Mat kernel_tm2(8*8, inch, outch); // Mat kernel_tm2(inch, 64, outch); #if __ARM_NEON && __aarch64__ Mat kernel_tm2(8*4*(inch/4) + 8*(inch%4), 64, outch/8 + (outch%8)/4 + outch%4); #else Mat kernel_tm2(4*4*(inch/4) + 4*(inch%4), 64, outch/4 + outch%4); #endif int p=0; #if __aarch64__ for (; p+7<outch; p+=8) { const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); const Mat kernel4_tm = kernel_tm.channel(p+4); const Mat kernel5_tm = kernel_tm.channel(p+5); const Mat kernel6_tm = kernel_tm.channel(p+6); const Mat kernel7_tm = kernel_tm.channel(p+7); Mat ktm2 = kernel_tm2.channel(p/8); for (int r=0; r<64; r++) { float* ktm2p = ktm2.row(r); for (int q=0; q<inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm1_0 = kernel1_tm.row(q); const float* ktm2_0 = kernel2_tm.row(q); const float* ktm3_0 = kernel3_tm.row(q); const float* ktm4_0 = kernel4_tm.row(q); const float* ktm5_0 = kernel5_tm.row(q); const float* ktm6_0 = kernel6_tm.row(q); const float* ktm7_0 = kernel7_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm1_0[r]; ktm2p[2] = ktm2_0[r]; ktm2p[3] = ktm3_0[r]; ktm2p[4] = ktm4_0[r]; ktm2p[5] = ktm5_0[r]; ktm2p[6] = ktm6_0[r]; ktm2p[7] = ktm7_0[r]; ktm2p += 8; } } } #endif // __aarch64__ for (; p+3<outch; p+=4) { const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); #if __ARM_NEON && __aarch64__ Mat ktm2 = kernel_tm2.channel(p/8+(p%8)/4); #else Mat ktm2 = kernel_tm2.channel(p/4); #endif for (int r=0; r<64; r++) { float* ktm2p = ktm2.row(r); for (int q=0; q<inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm1_0 = kernel1_tm.row(q); const float* ktm2_0 = kernel2_tm.row(q); const float* ktm3_0 = kernel3_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm1_0[r]; ktm2p[2] = ktm2_0[r]; ktm2p[3] = ktm3_0[r]; ktm2p += 4; } } } for (; p<outch; p++) { const Mat kernel0_tm = kernel_tm.channel(p); #if __ARM_NEON && __aarch64__ Mat ktm2 = kernel_tm2.channel(p/8+(p%8)/4+p%4); #else Mat ktm2 = kernel_tm2.channel(p/4+p%4); #endif for (int r=0; r<64; r++) { float* ktm2p = ktm2.row(r); for (int q=0; q<inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p += 1; } } } kernel_tm = kernel_tm2; } #if 0//TODO remove old code sometime later static void conv3x3s1_winograd64_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(8*8, w_tm/8 * h_tm/8, inch); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm = img0_tm.row(i * w_tm/8 + j); // TODO neon optimize for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; r0_tm += 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(8*8, w_tm/8 * h_tm/8, outch); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { #if __ARM_NEON #if __aarch64__ for (int m=0; m+7<64; m+=8) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output2_tm = vld1q_f32(output2_tm); float32x4_t _output3_tm = vld1q_f32(output3_tm); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k00 = vld1q_f32(k00); k00 += 64; float32x4_t _k01 = vld1q_f32(k00); k00 += 64; float32x4_t _k02 = vld1q_f32(k00); k00 += 64; float32x4_t _k03 = vld1q_f32(k00); k00 += 64; k00 -= 64*4; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tm = vmlaq_f32(_output0_tm, _r2, _k02); _output0_tm = vmlaq_f32(_output0_tm, _r3, _k03); float32x4_t _k10 = vld1q_f32(k10); k10 += 64; float32x4_t _k11 = vld1q_f32(k10); k10 += 64; float32x4_t _k12 = vld1q_f32(k10); k10 += 64; float32x4_t _k13 = vld1q_f32(k10); k10 += 64; k10 -= 64*4; _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tm = vmlaq_f32(_output1_tm, _r2, _k12); _output1_tm = vmlaq_f32(_output1_tm, _r3, _k13); float32x4_t _k20 = vld1q_f32(k20); k20 += 64; float32x4_t _k21 = vld1q_f32(k20); k20 += 64; float32x4_t _k22 = vld1q_f32(k20); k20 += 64; float32x4_t _k23 = vld1q_f32(k20); k20 += 64; k20 -= 64*4; _output2_tm = vmlaq_f32(_output2_tm, _r0, _k20); _output2_tm = vmlaq_f32(_output2_tm, _r1, _k21); _output2_tm = vmlaq_f32(_output2_tm, _r2, _k22); _output2_tm = vmlaq_f32(_output2_tm, _r3, _k23); float32x4_t _k30 = vld1q_f32(k30); k30 += 64; float32x4_t _k31 = vld1q_f32(k30); k30 += 64; float32x4_t _k32 = vld1q_f32(k30); k30 += 64; float32x4_t _k33 = vld1q_f32(k30); k30 += 64; k30 -= 64*4; _output3_tm = vmlaq_f32(_output3_tm, _r0, _k30); _output3_tm = vmlaq_f32(_output3_tm, _r1, _k31); _output3_tm = vmlaq_f32(_output3_tm, _r2, _k32); _output3_tm = vmlaq_f32(_output3_tm, _r3, _k33); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output2_tm, _output2_tm); vst1q_f32(output3_tm, _output3_tm); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k00 += 4; k10 += 4; k20 += 4; k30 += 4; float32x4_t _output0_tmn = vld1q_f32(output0_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm); float32x4_t _output2_tmn = vld1q_f32(output2_tm); float32x4_t _output3_tmn = vld1q_f32(output3_tm); float32x4_t _r0n = vld1q_f32(r0); float32x4_t _r1n = vld1q_f32(r1); float32x4_t _r2n = vld1q_f32(r2); float32x4_t _r3n = vld1q_f32(r3); float32x4_t _k00n = vld1q_f32(k00); k00 += 64; float32x4_t _k01n = vld1q_f32(k00); k00 += 64; float32x4_t _k02n = vld1q_f32(k00); k00 += 64; float32x4_t _k03n = vld1q_f32(k00); k00 += 64; k00 -= 64*4; _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k02n); _output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k03n); float32x4_t _k10n = vld1q_f32(k10); k10 += 64; float32x4_t _k11n = vld1q_f32(k10); k10 += 64; float32x4_t _k12n = vld1q_f32(k10); k10 += 64; float32x4_t _k13n = vld1q_f32(k10); k10 += 64; k10 -= 64*4; _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); _output1_tmn = vmlaq_f32(_output1_tmn, _r2n, _k12n); _output1_tmn = vmlaq_f32(_output1_tmn, _r3n, _k13n); float32x4_t _k20n = vld1q_f32(k20); k20 += 64; float32x4_t _k21n = vld1q_f32(k20); k20 += 64; float32x4_t _k22n = vld1q_f32(k20); k20 += 64; float32x4_t _k23n = vld1q_f32(k20); k20 += 64; k20 -= 64*4; _output2_tmn = vmlaq_f32(_output2_tmn, _r0n, _k20n); _output2_tmn = vmlaq_f32(_output2_tmn, _r1n, _k21n); _output2_tmn = vmlaq_f32(_output2_tmn, _r2n, _k22n); _output2_tmn = vmlaq_f32(_output2_tmn, _r3n, _k23n); float32x4_t _k30n = vld1q_f32(k30); k30 += 64; float32x4_t _k31n = vld1q_f32(k30); k30 += 64; float32x4_t _k32n = vld1q_f32(k30); k30 += 64; float32x4_t _k33n = vld1q_f32(k30); k30 += 64; k30 -= 64*4; _output3_tmn = vmlaq_f32(_output3_tmn, _r0n, _k30n); _output3_tmn = vmlaq_f32(_output3_tmn, _r1n, _k31n); _output3_tmn = vmlaq_f32(_output3_tmn, _r2n, _k32n); _output3_tmn = vmlaq_f32(_output3_tmn, _r3n, _k33n); vst1q_f32(output0_tm, _output0_tmn); vst1q_f32(output1_tm, _output1_tmn); vst1q_f32(output2_tm, _output2_tmn); vst1q_f32(output3_tm, _output3_tmn); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k00 += 4; k10 += 4; k20 += 4; k30 += 4; } #else // __aarch64__ asm volatile( "mov r4, #8 \n" "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm "0: \n" "pld [%4, #256] \n" "vld1.f32 {d0-d3}, [%4 :128]! \n"//q0 q1 = _r0 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k00 "add %8, %8, #256 \n" "vmla.f32 q8, q0, q10 \n" "vmla.f32 q9, q1, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]\n"//q12 q13 = _output1_tm "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k10 "add %9, %9, #256 \n" "vmla.f32 q12, q0, q14 \n" "vmla.f32 q13, q1, q15 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n"//q2 q3 = _r1 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k01 "add %8, %8, #256 \n" "vmla.f32 q8, q2, q10 \n" "vmla.f32 q9, q3, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k11 "add %9, %9, #256 \n" "vmla.f32 q12, q2, q14 \n" "vmla.f32 q13, q3, q15 \n" "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]!\n"//q4 q5 = _r2 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k02 "add %8, %8, #256 \n" "vmla.f32 q8, q4, q10 \n" "vmla.f32 q9, q5, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k12 "add %9, %9, #256 \n" "vmla.f32 q12, q4, q14 \n" "vmla.f32 q13, q5, q15 \n" "pld [%7, #256] \n" "vld1.f32 {d12-d15}, [%7 :128]!\n"//q6 q7 = _r3 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k03 "sub %8, %8, #736 \n" "vmla.f32 q8, q6, q10 \n" "vmla.f32 q9, q7, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k13 "sub %9, %9, #736 \n" "vmla.f32 q12, q6, q14 \n" "vmla.f32 q13, q7, q15 \n" "vst1.f32 {d16-d19}, [%0 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]\n"//q8 q9 = _output2_tm "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k20 "add %10, %10, #256 \n" "vmla.f32 q8, q0, q10 \n" "vmla.f32 q9, q1, q11 \n" "vst1.f32 {d24-d27}, [%1 :128]!\n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]\n"//q12 q13 = _output3_tm "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k30 "add %11, %11, #256 \n" "vmla.f32 q12, q0, q14 \n" "vmla.f32 q13, q1, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k21 "add %10, %10, #256 \n" "vmla.f32 q8, q2, q10 \n" "vmla.f32 q9, q3, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k31 "add %11, %11, #256 \n" "vmla.f32 q12, q2, q14 \n" "vmla.f32 q13, q3, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k22 "add %10, %10, #256 \n" "vmla.f32 q8, q4, q10 \n" "vmla.f32 q9, q5, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k32 "add %11, %11, #256 \n" "vmla.f32 q12, q4, q14 \n" "vmla.f32 q13, q5, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k23 "sub %10, %10, #736 \n" "vmla.f32 q8, q6, q10 \n" "vmla.f32 q9, q7, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k33 "sub %11, %11, #736 \n" "vmla.f32 q12, q6, q14 \n" "vmla.f32 q13, q7, q15 \n" "vst1.f32 {d16-d19}, [%2 :128]!\n" "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm "subs r4, r4, #1 \n" "vst1.f32 {d24-d27}, [%3 :128]!\n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(k00), // %8 "=r"(k10), // %9 "=r"(k20), // %10 "=r"(k30) // %11 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(k00), "9"(k10), "10"(k20), "11"(k30) : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ k00 -= 64; k10 -= 64; k20 -= 64; k30 -= 64; #else for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k00[m]; k00 += 64; output0_tm[m] += r1[m] * k00[m]; k00 += 64; output0_tm[m] += r2[m] * k00[m]; k00 += 64; output0_tm[m] += r3[m] * k00[m]; k00 += 64; k00 -= 64 * 4; output1_tm[m] += r0[m] * k10[m]; k10 += 64; output1_tm[m] += r1[m] * k10[m]; k10 += 64; output1_tm[m] += r2[m] * k10[m]; k10 += 64; output1_tm[m] += r3[m] * k10[m]; k10 += 64; k10 -= 64 * 4; output2_tm[m] += r0[m] * k20[m]; k20 += 64; output2_tm[m] += r1[m] * k20[m]; k20 += 64; output2_tm[m] += r2[m] * k20[m]; k20 += 64; output2_tm[m] += r3[m] * k20[m]; k20 += 64; k20 -= 64 * 4; output3_tm[m] += r0[m] * k30[m]; k30 += 64; output3_tm[m] += r1[m] * k30[m]; k30 += 64; output3_tm[m] += r2[m] * k30[m]; k30 += 64; output3_tm[m] += r3[m] * k30[m]; k30 += 64; k30 -= 64 * 4; } r0 += 64; r1 += 64; r2 += 64; r3 += 64; output0_tm += 64; output1_tm += 64; output2_tm += 64; output3_tm += 64; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { // TODO neon optimize for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; output1_tm[m] += r0[m] * k1[m]; output2_tm[m] += r0[m] * k2[m]; output3_tm[m] += r0[m] * k3[m]; } r0 += 64; output0_tm += 64; output1_tm += 64; output2_tm += 64; output3_tm += 64; } } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); const float* k2 = kernel0_tm.row(q+2); const float* k3 = kernel0_tm.row(q+3); float* output0_tm = out0_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { #if __ARM_NEON #if __aarch64__ for (int m=0; m+7<64; m+=8) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); float32x4_t _k3 = vld1q_f32(k3); _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tm = vmlaq_f32(_output0_tm, _r2, _k2); _output0_tm = vmlaq_f32(_output0_tm, _r3, _k3); vst1q_f32(output0_tm, _output0_tm); output0_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k0 += 4; k1 += 4; k2 += 4; k3 += 4; float32x4_t _output0_tmn = vld1q_f32(output0_tm); float32x4_t _r0n = vld1q_f32(r0); float32x4_t _r1n = vld1q_f32(r1); float32x4_t _r2n = vld1q_f32(r2); float32x4_t _r3n = vld1q_f32(r3); float32x4_t _k0n = vld1q_f32(k0); float32x4_t _k1n = vld1q_f32(k1); float32x4_t _k2n = vld1q_f32(k2); float32x4_t _k3n = vld1q_f32(k3); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); _output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k2n); _output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k3n); vst1q_f32(output0_tm, _output0_tmn); output0_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k0 += 4; k1 += 4; k2 += 4; k3 += 4; } #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "mov r4, %0 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "vmla.f32 q15, q9, q11 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(k0), // %5 "=r"(k1), // %6 "=r"(k2), // %7 "=r"(k3) // %8 : "0"(output0_tm), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(k0), "6"(k1), "7"(k2), "8"(k3) : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ k0 -= 64; k1 -= 64; k2 -= 64; k3 -= 64; #else for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; output0_tm[m] += r1[m] * k1[m]; output0_tm[m] += r2[m] * k2[m]; output0_tm[m] += r3[m] * k3[m]; } r0 += 64; r1 += 64; r2 += 64; r3 += 64; output0_tm += 64; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); float* output0_tm = out0_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { // TODO neon optimize for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; } r0 += 64; output0_tm += 64; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm = out0_tm.row(i * w_tm/8 + j); float* output0 = out0.row(i * 6) + j * 6; // TODO neon optimize for (int m=0; m<8; m++) { float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm += 8; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s1_winograd64_neon2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm01 = img0_tm.row(i * w_tm/8 + j); float* r0_tm23 = img0_tm.row(tiles + i * w_tm/8 + j); float* r0_tm45 = img0_tm.row(tiles * 2 + i * w_tm/8 + j); float* r0_tm67 = img0_tm.row(tiles * 3 + i * w_tm/8 + j); for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tms[4] = { r0_tm01, r0_tm23, r0_tm45, r0_tm67 }; for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; float* r0_tm = r0_tms[m/2] + (m%2) * 8; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); float* output0_tm = out0_tm; for (int r=0; r<4; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k0n = vld1q_f32(k0+4); float32x4_t _k0nn = vld1q_f32(k0+8); float32x4_t _k0nnn = vld1q_f32(k0+12); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k1n = vld1q_f32(k1+4); float32x4_t _k1nn = vld1q_f32(k1+8); float32x4_t _k1nnn = vld1q_f32(k1+12); #else float32x4_t _k0; float32x4_t _k0n; float32x4_t _k0nn; float32x4_t _k0nnn; float32x4_t _k1; float32x4_t _k1n; float32x4_t _k1nn; float32x4_t _k1nnn; asm volatile( "pld [%0, #512] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #512] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" "vld1.f32 {%e6-%f6}, [%0 :128]! \n" "vld1.f32 {%e8-%f8}, [%1 :128]! \n" "vld1.f32 {%e7-%f7}, [%0 :128]! \n" "vld1.f32 {%e9-%f9}, [%1 :128]! \n" : "=r"(k0), // %0 "=r"(k1), // %1 "=w"(_k0), // %2 "=w"(_k0n), // %3 "=w"(_k1), // %4 "=w"(_k1n), // %5 "=w"(_k0nn), // %6 "=w"(_k0nnn), // %7 "=w"(_k1nn), // %8 "=w"(_k1nnn) // %9 : "0"(k0), "1"(k1) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; } #else if (nn > 0) { asm volatile( "mov r4, %1 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "0: \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "subs %0, #1 \n" "vst1.f32 {d20-d23}, [r4 :128]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(r1), "w"(_k0), // %8 "w"(_k0n), // %9 "w"(_k1), // %10 "w"(_k1n), // %11 "w"(_k0nn), // %12 "w"(_k0nnn), // %13 "w"(_k1nn), // %14 "w"(_k1nnn) // %15 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "mov r4, %0 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q9, q13, %q7 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "vmla.f32 q8, q14, %q8 \n" "pld [%0, #256] \n" "vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm "vmla.f32 q9, q15, %q9 \n" "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "vst1.f32 {d16-d19}, [r4 :128] \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q10, q14, %q12 \n" "vmla.f32 q11, q15, %q13 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(output0_tm), "1"(r0), "2"(r1), "w"(_k0), // %6 "w"(_k0n), // %7 "w"(_k1), // %8 "w"(_k1n), // %9 "w"(_k0nn), // %10 "w"(_k0nnn), // %11 "w"(_k1nn), // %12 "w"(_k1nnn) // %13 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<16; m++) { output0_tm[m] += r0[m] * k0[m]; output0_tm[m] += r1[m] * k1[m]; } r0 += 16; r1 += 16; output0_tm += 16; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k0 += 16; k1 += 16; #endif // __aarch64__ #else k0 += 16; k1 += 16; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); float* output0_tm = out0_tm; for (int r=0; r<4; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k0n = vld1q_f32(k0+4); float32x4_t _k0nn = vld1q_f32(k0+8); float32x4_t _k0nnn = vld1q_f32(k0+12); #else float32x4_t _k0; float32x4_t _k0n; float32x4_t _k0nn; float32x4_t _k0nnn; asm volatile( "pld [%0, #512] \n" "vld1.f32 {%e1-%f1}, [%0 :128]! \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e4-%f4}, [%0 :128]! \n" : "=r"(k0), // %0 "=w"(_k0), // %1 "=w"(_k0n), // %2 "=w"(_k0nn), // %3 "=w"(_k0nnn) // %4 : "0"(k0) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "mov r4, %0 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q4 \n" "vmla.f32 q9, q13, %q5 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q6 \n" "vst1.f32 {d16-d19}, [r4 :128] \n" "vmla.f32 q11, q13, %q7 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k0), // %4 "w"(_k0n), // %5 "w"(_k0nn), // %6 "w"(_k0nnn) // %7 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<16; m++) { output0_tm[m] += r0[m] * k0[m]; } r0 += 16; output0_tm += 16; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k0 += 16; #endif // __aarch64__ #else k0 += 16; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm01 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm23 = out0_tm.row(tiles + i * w_tm/8 + j); const float* output0_tm45 = out0_tm.row(tiles * 2 + i * w_tm/8 + j); const float* output0_tm67 = out0_tm.row(tiles * 3 + i * w_tm/8 + j); float* output0 = out0.row(i * 6) + j * 6; const float* output0_tms[4] = { output0_tm01, output0_tm23, output0_tm45, output0_tm67 }; for (int m=0; m<8; m++) { const float* output0_tm = output0_tms[m/2] + (m%2) * 8; float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s1_winograd64_neon3(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles * 2); float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles * 3); float* r0_tm4 = img0_tm.row(i * w_tm/8 + j + tiles * 4); float* r0_tm5 = img0_tm.row(i * w_tm/8 + j + tiles * 5); float* r0_tm6 = img0_tm.row(i * w_tm/8 + j + tiles * 6); float* r0_tm7 = img0_tm.row(i * w_tm/8 + j + tiles * 7); for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tms[8] = { r0_tm0, r0_tm1, r0_tm2, r0_tm3, r0_tm4, r0_tm5, r0_tm6, r0_tm7 }; for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; float* r0_tm = r0_tms[m]; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); out0_tm.fill(0.f); out1_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k01 = vld1q_f32(k01); float32x4_t _k01n = vld1q_f32(k01+4); float32x4_t _k10 = vld1q_f32(k10); float32x4_t _k10n = vld1q_f32(k10+4); float32x4_t _k11 = vld1q_f32(k11); float32x4_t _k11n = vld1q_f32(k11+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k01; float32x4_t _k01n; float32x4_t _k10; float32x4_t _k10n; float32x4_t _k11; float32x4_t _k11n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e4-%f4}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e6-%f6}, [%1 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {%e8-%f8}, [%2 :128]! \n" "pld [%3, #256] \n" "vld1.f32 {%e10-%f10}, [%3 :128]! \n" "vld1.f32 {%e5-%f5}, [%0 :128]! \n" "vld1.f32 {%e7-%f7}, [%1 :128]! \n" "vld1.f32 {%e9-%f9}, [%2 :128]! \n" "vld1.f32 {%e11-%f11}, [%3 :128]! \n" : "=r"(k00), // %0 "=r"(k01), // %1 "=r"(k10), // %2 "=r"(k11), // %3 "=w"(_k00), // %4 "=w"(_k00n), // %5 "=w"(_k01), // %6 "=w"(_k01n), // %7 "=w"(_k10), // %8 "=w"(_k10n), // %9 "=w"(_k11), // %10 "=w"(_k11n) // %11 : "0"(k00), "1"(k01), "2"(k10), "3"(k11) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(r1) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(r1), "w"(_k00), // %10 "w"(_k00n), // %11 "w"(_k01), // %12 "w"(_k01n), // %13 "w"(_k10), // %14 "w"(_k10n), // %15 "w"(_k11), // %16 "w"(_k11n) // %17 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; #else asm volatile( "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(output0_tm), "1"(output1_tm), "2"(r0), "3"(r1), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k01), // %10 "w"(_k01n), // %11 "w"(_k10), // %12 "w"(_k10n), // %13 "w"(_k11), // %14 "w"(_k11n) // %15 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output0_tm[m] += r1[m] * k01[m]; output1_tm[m] += r0[m] * k10[m]; output1_tm[m] += r1[m] * k11[m]; } r0 += 8; r1 += 8; output0_tm += 8; output1_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k01 += 8; k10 += 8; k11 += 8; #endif // __aarch64__ #else k00 += 8; k01 += 8; k10 += 8; k11 += 8; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k10 = vld1q_f32(k10); float32x4_t _k10n = vld1q_f32(k10+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k10; float32x4_t _k10n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" : "=r"(k00), // %0 "=r"(k10), // %1 "=w"(_k00), // %2 "=w"(_k00n), // %3 "=w"(_k10), // %4 "=w"(_k10n) // %5 : "0"(k00), "1"(k10) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0) // %3 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k10), // %10 "w"(_k10n) // %11 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; #else asm volatile( "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "vmla.f32 q9, q13, %q7 \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q8 \n" "vmla.f32 q11, q13, %q9 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(r0) // %2 : "0"(output0_tm), "1"(output1_tm), "2"(r0), "w"(_k00), // %6 "w"(_k00n), // %7 "w"(_k10), // %8 "w"(_k10n) // %9 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output1_tm[m] += r0[m] * k10[m]; } r0 += 8; output0_tm += 8; output1_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k10 += 8; #endif // __aarch64__ #else k00 += 8; k10 += 8; #endif // __ARM_NEON } } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); float* output0_tm = out0_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k01 = vld1q_f32(k01); float32x4_t _k01n = vld1q_f32(k01+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k01; float32x4_t _k01n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" : "=r"(k00), // %0 "=r"(k01), // %1 "=w"(_k00), // %2 "=w"(_k00n), // %3 "=w"(_k01), // %4 "=w"(_k01n) // %5 : "0"(k00), "1"(k01) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(r1), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k01), // %10 "w"(_k01n) // %11 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "vmla.f32 q9, q13, %q7 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q8 \n" "vmla.f32 q9, q15, %q9 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(output0_tm), "1"(r0), "2"(r1), "w"(_k00), // %6 "w"(_k00n), // %7 "w"(_k01), // %8 "w"(_k01n) // %9 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output0_tm[m] += r1[m] * k01[m]; } r0 += 8; r1 += 8; output0_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k01 += 8; #endif // __aarch64__ #else k00 += 8; k01 += 8; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k00 = kernel0_tm.row(q); float* output0_tm = out0_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); #else float32x4_t _k00; float32x4_t _k00n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e1-%f1}, [%0 :128]! \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" : "=r"(k00), // %0 "=w"(_k00), // %1 "=w"(_k00n) // %2 : "0"(k00) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q4 \n" "vmla.f32 q9, q13, %q5 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00), // %4 "w"(_k00n) // %5 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; } r0 += 8; output0_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; #endif // __aarch64__ #else k00 += 8; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles * 2); const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles * 3); const float* output0_tm4 = out0_tm.row(i * w_tm/8 + j + tiles * 4); const float* output0_tm5 = out0_tm.row(i * w_tm/8 + j + tiles * 5); const float* output0_tm6 = out0_tm.row(i * w_tm/8 + j + tiles * 6); const float* output0_tm7 = out0_tm.row(i * w_tm/8 + j + tiles * 7); float* output0 = out0.row(i * 6) + j * 6; const float* output0_tms[8] = { output0_tm0, output0_tm1, output0_tm2, output0_tm3, output0_tm4, output0_tm5, output0_tm6, output0_tm7 }; for (int m=0; m<8; m++) { const float* output0_tm = output0_tms[m]; float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } #endif static void conv3x3s1_winograd64_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, inch, 4u, opt.workspace_allocator); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff+4); #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w*2; const float* r3 = r0 + w*3; #if __aarch64__ for (int m=0; m+3<8; m+=4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0+4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1+4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2+4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3+4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w*4; r1 += w*4; r2 += w*4; r3 += w*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m+3<8; m+=4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2+4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3); float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3); float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3); float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3); t0 += 8*4; t1 += 8*4; t2 += 8*4; t3 += 8*4; r0_tm0_0 += img0_tm.w*tiles*2*4; r0_tm0_4 += img0_tm.w*tiles*2*4; r0_tm1_0 += img0_tm.w*tiles*2*4; r0_tm1_4 += img0_tm.w*tiles*2*4; r0_tm2_0 += img0_tm.w*tiles*2*4; r0_tm2_4 += img0_tm.w*tiles*2*4; r0_tm3_0 += img0_tm.w*tiles*2*4; r0_tm3_4 += img0_tm.w*tiles*2*4; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); int step = img0_tm.w*tiles*2*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%2], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4], %26 \n" "vst1.f32 {d17[1]}, [%6], %26 \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "sub %0, %0, #12 \n" "sub %2, %2, #12 \n" "sub %4, %4, #12 \n" "sub %6, %6, #12 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1], %26 \n" "vst1.f32 {d4-d5}, [%3], %26 \n" "vst1.f32 {d6-d7}, [%5], %26 \n" "vst1.f32 {d12-d13}, [%7], %26 \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0] \n" "vst1.f32 {d16[1]}, [%2] \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4] \n" "vst1.f32 {d17[1]}, [%6] \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1] \n" "vst1.f32 {d4-d5}, [%3] \n" "vst1.f32 {d6-d7}, [%5] \n" "vst1.f32 {d12-d13}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm0_4), // %1 "=r"(r0_tm1_0), // %2 "=r"(r0_tm1_4), // %3 "=r"(r0_tm2_0), // %4 "=r"(r0_tm2_4), // %5 "=r"(r0_tm3_0), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm0_4), "2"(r0_tm1_0), "3"(r0_tm1_4), "4"(r0_tm2_0), "5"(r0_tm2_4), "6"(r0_tm3_0), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles); for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_0[1] = tmp12a + tmp12b; r0_tm_0[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_0[3] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_4[1] = tmp56a + tmp56b; r0_tm_4[2] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 2; r0_tm_4 += img0_tm.w * tiles * 2; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, outch, 4u, opt.workspace_allocator); const int tiles = h_tm/8 * w_tm/8; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const float* ktm = kernel_tm.channel(pp); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; #if __ARM_NEON && __aarch64__ for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%8], #64 \n"// v0 v1 v2 v3 = _k00 _k01 _k02 _k03 "prfm pldl1keep, [%8, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%8], #64 \n"// v4 v5 v6 v7 = _k10 _k11 _k12 _k13 "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"// v8 v9 v10 v11 = _k20 _k21 _k22 _k23 "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"// v12 v13 v14 v15 = _k30 _k31 _k32 _k33 // tile loop "lsr w1, %w18, #2 \n"// w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n"// "ld1 {v16.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "add x4, %0, #16 \n"// x4 = %0 next "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "add x5, %1, #16 \n"// x5 = %1 next "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "add x6, %2, #16 \n"// x6 = %2 next "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "add x7, %3, #16 \n"// x7 = %3 next "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "add %0, %0, #32 \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "add %1, %1, #32 \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "add %2, %2, #32 \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" "add %3, %3, #32 \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "st1 {v24.4s}, [x4] \n" "add x4, x4, #32 \n" "fmla v20.4s, v16.4s, v0.4s \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v22.4s, v16.4s, v8.4s \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "st1 {v25.4s}, [x5] \n" "add x5, x5, #32 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "st1 {v26.4s}, [x6] \n" "add x6, x6, #32 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "st1 {v27.4s}, [x7] \n" "add x7, x7, #32 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "st1 {v21.4s}, [%1] \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "st1 {v22.4s}, [%2] \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "st1 {v23.4s}, [%3] \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" "st1 {v24.4s}, [x4], #16 \n" "mov %0, x4 \n" "st1 {v25.4s}, [x5], #16 \n" "mov %1, x5 \n" "subs w1, w1, #1 \n" "st1 {v26.4s}, [x6], #16 \n" "mov %2, x6 \n" "st1 {v27.4s}, [x7], #16 \n" "mov %3, x7 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w18, #3 \n"// w1 = remain = tiles & 3; "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" "st1 {v20.4s}, [%0], #16 \n" "st1 {v21.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v22.4s}, [%2], #16 \n" "st1 {v23.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(ktm) // %8 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(ktm), "r"(tiles) // %18 : "cc", "memory", "x0", "x1", "x4", "x5", "x6", "x7", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); } #endif // __ARM_NEON && __aarch64__ for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v0.4s, v1.4s}, [%6], #32 \n"// v0 v1 = _k00 _k01 "prfm pldl1keep, [%6, #256] \n" "ld1 {v2.4s, v3.4s}, [%6], #32 \n"// v2 v3 = _k10 _k11 "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4s, v5.4s}, [%6], #32 \n"// v4 v5 = _k20 _k21 "prfm pldl1keep, [%6, #256] \n" "ld1 {v6.4s, v7.4s}, [%6], #32 \n"// v6 v7 = _k30 _k31 // tile loop "lsr w1, %w14, #2 \n"// w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w14, #3 \n"// w1 = remain = tiles & 3; "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21" ); #else asm volatile( "mov r0, #16 \n"// r0 = r = 16 "0: \n" "pld [%6, #256] \n" "vld1.f32 {d0-d3}, [%6 :128]! \n"// q0 q1 = _k00 _k01 "pld [%6, #256] \n" "vld1.f32 {d4-d7}, [%6 :128]! \n"// q2 q3 = _k10 _k11 "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]! \n"// q4 q5 = _k20 _k21 "pld [%6, #256] \n" "vld1.f32 {d12-d15}, [%6 :128]! \n"// q6 q7 = _k30 _k31 // tile loop "lsr r1, %14, #2 \n"// r1 = nn = tiles >> 2 "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "1: \n" "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and r1, %14, #3 \n"// r1 = remain = tiles & 3; "cmp r1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 3b \n" //END remain loop "4: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int r=0; r<16; r++) { for (int t=0; t<tiles; t++) { for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[0 +m]; output0_tm[m] += r1[m] * ktm[4 +m]; output1_tm[m] += r0[m] * ktm[8 +m]; output1_tm[m] += r1[m] * ktm[12+m]; output2_tm[m] += r0[m] * ktm[16+m]; output2_tm[m] += r1[m] * ktm[20+m]; output3_tm[m] += r0[m] * ktm[24+m]; output3_tm[m] += r1[m] * ktm[28+m]; } r0 += 4; r1 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 32; } #endif // __ARM_NEON } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n"// v0 v1 = _k00 _k10 "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4s, v3.4s}, [%5], #32 \n"// v2 v3 = _k20 _k30 // tile loop "mov w1, %w12 \n"// w1 = tiles "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v17.4s}, [%0] \n" "fmla v17.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "fmla v18.4s, v16.4s, v1.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "fmla v19.4s, v16.4s, v2.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v20.4s}, [%3] \n" "fmla v20.4s, v16.4s, v3.4s \n" "st1 {v17.4s}, [%0], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v19.4s}, [%2], #16 \n" "st1 {v20.4s}, [%3], #16 \n" "bne 1b \n" //END tile loop "2: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20" ); #else asm volatile( "mov r0, #16 \n"// r0 = r = 16 "0: \n" "pld [%5, #256] \n" "vld1.f32 {d0-d3}, [%5 :128]! \n"// q0 q1 = _k00 _k10 "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n"// q2 q3 = _k20 _k30 // tile loop "mov r1, %12 \n"// r1 = tiles "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q1 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q2 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q3 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" //END tile loop "2: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int r=0; r<16; r++) { for (int t=0; t<tiles; t++) { for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[0 +m]; output1_tm[m] += r0[m] * ktm[4 +m]; output2_tm[m] += r0[m] * ktm[8 +m]; output3_tm[m] += r0[m] * ktm[12+m]; } r0 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 16; } #endif // __ARM_NEON } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const float* ktm = (const float*)kernel_tm.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start); out0_tm.fill(0.f); int q = 0; for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; for (int r=0; r<16; r++) { #if __ARM_NEON float32x4_t _k00 = vld1q_f32(ktm); ktm += 4; #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v17.4s, %4.4s \n" "st1 {v16.4s}, [%0], #16 \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "v16", "v17" ); #else asm volatile( "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128]! \n"// q9 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q9, %q4 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "q8", "q9" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[m]; } r0 += 4; output0_tm += 4; #endif // __ARM_NEON } #if !__ARM_NEON ktm += 4; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = { 4.f, 8.f, 16.f, 32.f }; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { #if __ARM_NEON const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*2); const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*3); const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*4); const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*5); const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*6); const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*7); #if __aarch64__ for (int m=0; m+3<8; m+=4) { float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0); float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4); float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0); float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4); float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0); float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4); float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0); float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4); float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123); float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567); float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123); float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567); // no vswp intrinsic :( float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1])); float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1])); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0_0 += out0_tm.w * tiles * 2*4; output0_tm0_4 += out0_tm.w * tiles * 2*4; output0_tm1_0 += out0_tm.w * tiles * 2*4; output0_tm1_4 += out0_tm.w * tiles * 2*4; output0_tm2_0 += out0_tm.w * tiles * 2*4; output0_tm2_4 += out0_tm.w * tiles * 2*4; output0_tm3_0 += out0_tm.w * tiles * 2*4; output0_tm3_4 += out0_tm.w * tiles * 2*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m=0; m+1<6; m+=2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8*2; t1 += 8*2; output0 += outw*2; output1 += outw*2; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; int step = out0_tm.w * tiles * 2*4 *4; asm volatile( // loop0 "vld1.f32 {d16-d17}, [%2], %21 \n" "vld1.f32 {d18-d19}, [%3], %21 \n" "vld1.f32 {d20-d21}, [%4], %21 \n" "vld1.f32 {d22-d23}, [%5], %21 \n" "vld1.f32 {d24-d25}, [%6], %21 \n" "vld1.f32 {d26-d27}, [%7], %21 \n" "vld1.f32 {d28-d29}, [%8], %21 \n" "vld1.f32 {d30-d31}, [%9], %21 \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "sub %0, %0, #112 \n" "vst1.f32 {d30-d31}, [%1] \n" "sub %1, %1, #112 \n" // loop1 "vld1.f32 {d16-d17}, [%2] \n" "vld1.f32 {d18-d19}, [%3] \n" "vld1.f32 {d20-d21}, [%4] \n" "vld1.f32 {d22-d23}, [%5] \n" "vld1.f32 {d24-d25}, [%6] \n" "vld1.f32 {d26-d27}, [%7] \n" "vld1.f32 {d28-d29}, [%8] \n" "vld1.f32 {d30-d31}, [%9] \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "vst1.f32 {d30-d31}, [%1] \n" : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(output0_tm0_0), // %2 "=r"(output0_tm0_4), // %3 "=r"(output0_tm1_0), // %4 "=r"(output0_tm1_4), // %5 "=r"(output0_tm2_0), // %6 "=r"(output0_tm2_4), // %7 "=r"(output0_tm3_0), // %8 "=r"(output0_tm3_4) // %9 : "0"(t0), "1"(t1), "2"(output0_tm0_0), "3"(output0_tm0_4), "4"(output0_tm1_0), "5"(output0_tm1_4), "6"(output0_tm2_0), "7"(output0_tm2_4), "8"(output0_tm3_0), "9"(output0_tm3_4), "w"(_coeff), // %20 "r"(step) // %21 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; int stepw = outw*2 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop1 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop2 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(t0), // %2 "=r"(t1) // %3 : "0"(output0), "1"(output1), "2"(t0), "3"(t1), "w"(_coeff), // %8 "w"(_bias0), // %9 "r"(stepw) // %10 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles); for (int m=0; m<8; m++) { float tmp024a = output0_tm_0[1] + output0_tm_0[2]; float tmp135a = output0_tm_0[1] - output0_tm_0[2]; float tmp024b = output0_tm_0[3] + output0_tm_4[0]; float tmp135b = output0_tm_0[3] - output0_tm_4[0]; float tmp024c = output0_tm_4[1] + output0_tm_4[2]; float tmp135c = output0_tm_4[1] - output0_tm_4[2]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 2; output0_tm_4 += out0_tm.w * tiles * 2; } float* output0 = out0.row(i * 6) + j * 6; for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd64_neon5(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; bottom_blob_tm.create(1, 64 * tiles, inch, 4u, opt.workspace_allocator); // bottom_blob_tm.create(inch, tiles, 64); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff+4); #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w*2; const float* r3 = r0 + w*3; #if __aarch64__ for (int m=0; m+3<8; m+=4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0+4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1+4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2+4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3+4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w*4; r1 += w*4; r2 += w*4; r3 += w*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles*8); float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles*16); float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles*24); for (int m=0; m+3<8; m+=4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2+4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3); t0 += 8*4; t1 += 8*4; t2 += 8*4; t3 += 8*4; r0_tm0 += img0_tm.w*tiles*25; r0_tm1 += img0_tm.w*tiles*25; r0_tm2 += img0_tm.w*tiles*25; r0_tm3 += img0_tm.w*tiles*25; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*8); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*16); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*24); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles*32); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*40); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*48); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*56); int step = img0_tm.w*tiles*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0], %26 \n" "vst1.f32 {d4[1]}, [%1], %26 \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%2], %26 \n" "vst1.f32 {d5[1]}, [%3], %26 \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%1], %26 \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%2], %26 \n" "vst1.f32 {d17[1]}, [%3], %26 \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0], %26 \n" "vst1.f32 {d18[1]}, [%1], %26 \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%2], %26 \n" "vst1.f32 {d19[1]}, [%3], %26 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%1], %26 \n" "vst1.f32 {d17[0]}, [%2], %26 \n" "vst1.f32 {d17[1]}, [%3], %26 \n" "vadd.f32 q2, q4, q5 \n" "vst1.f32 {d18[0]}, [%0], %26 \n" "vst1.f32 {d18[1]}, [%1], %26 \n" "vst1.f32 {d19[0]}, [%2], %26 \n" "vst1.f32 {d19[1]}, [%3], %26 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d4[0]}, [%0], %26 \n" "vst1.f32 {d4[1]}, [%1], %26 \n" "vst1.f32 {d5[0]}, [%2], %26 \n" "vst1.f32 {d5[1]}, [%3], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d6[0]}, [%0], %26 \n" "vst1.f32 {d6[1]}, [%1], %26 \n" "vst1.f32 {d7[0]}, [%2], %26 \n" "vst1.f32 {d7[1]}, [%3], %26 \n" "vst1.f32 {d12[0]}, [%0] \n" "vst1.f32 {d12[1]}, [%1] \n" "vst1.f32 {d13[0]}, [%2] \n" "vst1.f32 {d13[1]}, [%3] \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%4], %26 \n" "vst1.f32 {d4[1]}, [%5], %26 \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%6], %26 \n" "vst1.f32 {d5[1]}, [%7], %26 \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%4], %26 \n" "vst1.f32 {d16[1]}, [%5], %26 \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%6], %26 \n" "vst1.f32 {d17[1]}, [%7], %26 \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%4], %26 \n" "vst1.f32 {d18[1]}, [%5], %26 \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%6], %26 \n" "vst1.f32 {d19[1]}, [%7], %26 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vst1.f32 {d16[0]}, [%4], %26 \n" "vst1.f32 {d16[1]}, [%5], %26 \n" "vst1.f32 {d17[0]}, [%6], %26 \n" "vst1.f32 {d17[1]}, [%7], %26 \n" "vadd.f32 q2, q4, q5 \n" "vst1.f32 {d18[0]}, [%4], %26 \n" "vst1.f32 {d18[1]}, [%5], %26 \n" "vst1.f32 {d19[0]}, [%6], %26 \n" "vst1.f32 {d19[1]}, [%7], %26 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d4[0]}, [%4], %26 \n" "vst1.f32 {d4[1]}, [%5], %26 \n" "vst1.f32 {d5[0]}, [%6], %26 \n" "vst1.f32 {d5[1]}, [%7], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d6[0]}, [%4], %26 \n" "vst1.f32 {d6[1]}, [%5], %26 \n" "vst1.f32 {d7[0]}, [%6], %26 \n" "vst1.f32 {d7[1]}, [%7], %26 \n" "vst1.f32 {d12[0]}, [%4] \n" "vst1.f32 {d12[1]}, [%5] \n" "vst1.f32 {d13[0]}, [%6] \n" "vst1.f32 {d13[1]}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm1_0), // %1 "=r"(r0_tm2_0), // %2 "=r"(r0_tm3_0), // %3 "=r"(r0_tm0_4), // %4 "=r"(r0_tm1_4), // %5 "=r"(r0_tm2_4), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm1_0), "2"(r0_tm2_0), "3"(r0_tm3_0), "4"(r0_tm0_4), "5"(r0_tm1_4), "6"(r0_tm2_4), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm_1 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm_2 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm_3 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm_5 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm_6 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm_7 = img0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_1[0] = tmp12a + tmp12b; r0_tm_2[0] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_3[0] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_5[0] = tmp56a + tmp56b; r0_tm_6[0] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 8; r0_tm_1 += img0_tm.w * tiles * 8; r0_tm_2 += img0_tm.w * tiles * 8; r0_tm_3 += img0_tm.w * tiles * 8; r0_tm_4 += img0_tm.w * tiles * 8; r0_tm_5 += img0_tm.w * tiles * 8; r0_tm_6 += img0_tm.w * tiles * 8; r0_tm_7 += img0_tm.w * tiles * 8; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; // permute // bottom_blob_tm.create(1, 64 * tiles, inch); // Mat bottom_blob_tm2(inch, tiles, 64); Mat bottom_blob_tm2(8*inch, tiles/8 + (tiles%8)/4 + tiles%4, 64, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i=0; for (; i+7<tiles; i+=8) { float* tm2p = tm2.row(i/8); const float* r0 = bottom_blob_tm; r0 += r*tiles + i; for (int q=0; q<inch; q++) { #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); vst1q_f32(tm2p, _r0); vst1q_f32(tm2p+4, _r0n); #else tm2p[0] = r0[0]; tm2p[1] = r0[1]; tm2p[2] = r0[2]; tm2p[3] = r0[3]; tm2p[4] = r0[4]; tm2p[5] = r0[5]; tm2p[6] = r0[6]; tm2p[7] = r0[7]; #endif // __ARM_NEON r0 += bottom_blob_tm.cstep; tm2p += 8; } } for (; i+3<tiles; i+=4) { float* tm2p = tm2.row(i/8+(i%8)/4); const float* r0 = bottom_blob_tm; r0 += r*tiles + i; for (int q=0; q<inch; q++) { #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); vst1q_f32(tm2p, _r0); #else tm2p[0] = r0[0]; tm2p[1] = r0[1]; tm2p[2] = r0[2]; tm2p[3] = r0[3]; #endif // __ARM_NEON r0 += bottom_blob_tm.cstep; tm2p += 4; } } for (; i<tiles; i++) { float* tm2p = tm2.row(i/8+(i%8)/4+i%4); const float* r0 = bottom_blob_tm; r0 += r*tiles + i; for (int q=0; q<inch; q++) { tm2p[0] = r0[0]; r0 += bottom_blob_tm.cstep; tm2p += 1; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(1, 64 * tiles, outch); int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; const Mat kernel_tm0 = kernel_tm.channel(p/8); Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); Mat out4_tm = top_blob_tm.channel(p+4); Mat out5_tm = top_blob_tm.channel(p+5); Mat out6_tm = top_blob_tm.channel(p+6); Mat out7_tm = top_blob_tm.channel(p+7); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; float* output4_tm = out4_tm; float* output5_tm = out5_tm; float* output6_tm = out6_tm; float* output7_tm = out7_tm; for (int r=0; r<64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i=0; for (; i+7<tiles; i+=8) { const float* bb2p0 = bb2.row(i/8); const float* ktm0 = kernel_tm0.row(r); asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" // inch loop "lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = tiles & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" "st1 {v24.4s, v25.4s}, [%4], #32 \n" "st1 {v26.4s, v27.4s}, [%5], #32 \n" "st1 {v28.4s, v29.4s}, [%6], #32 \n" "st1 {v30.4s, v31.4s}, [%7], #32 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(bb2p0), // %8 "=r"(ktm0) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(bb2p0), "9"(ktm0), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+3<tiles; i+=4) { const float* bb2p0 = bb2.row(i/8+(i%8)/4); const float* ktm0 = kernel_tm0.row(r); asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" // inch loop "lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = tiles & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "st1 {v20.4s}, [%4], #16 \n" "st1 {v21.4s}, [%5], #16 \n" "st1 {v22.4s}, [%6], #16 \n" "st1 {v23.4s}, [%7], #16 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(bb2p0), // %8 "=r"(ktm0) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(bb2p0), "9"(ktm0), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i<tiles; i++) { const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0123 = vdupq_n_f32(0.f); float32x4_t _sum4567 = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm1, _bb2p0, 0); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 1); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm3, _bb2p0, 1); // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm4 = vld1q_f32(ktm0 + 0); float32x4_t _ktm5 = vld1q_f32(ktm0 + 4); float32x4_t _ktm6 = vld1q_f32(ktm0 + 8); float32x4_t _ktm7 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm4, _bb2p0, 2); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm5, _bb2p0, 2); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm6, _bb2p0, 3); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm7, _bb2p0, 3); } for (; q<inch; q++) { float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0); float32x4_t _ktm0123 = vld1q_f32(ktm0 + 0); float32x4_t _ktm4567 = vld1q_f32(ktm0 + 4); _sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0123); _sum4567 = vmlaq_f32(_sum4567, _bb2p0, _ktm4567); bb2p0 += 1; ktm0 += 8; } float sum0 = vgetq_lane_f32(_sum0123, 0); float sum1 = vgetq_lane_f32(_sum0123, 1); float sum2 = vgetq_lane_f32(_sum0123, 2); float sum3 = vgetq_lane_f32(_sum0123, 3); float sum4 = vgetq_lane_f32(_sum4567, 0); float sum5 = vgetq_lane_f32(_sum4567, 1); float sum6 = vgetq_lane_f32(_sum4567, 2); float sum7 = vgetq_lane_f32(_sum4567, 3); output0_tm[0] = sum0; output1_tm[0] = sum1; output2_tm[0] = sum2; output3_tm[0] = sum3; output4_tm[0] = sum4; output5_tm[0] = sum5; output6_tm[0] = sum6; output7_tm[0] = sum7; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; output4_tm += 1; output5_tm += 1; output6_tm += 1; output7_tm += 1; } } } #endif // __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; #if __ARM_NEON && __aarch64__ const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4); #else const Mat kernel_tm0 = kernel_tm.channel(p/4); #endif Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; for (int r=0; r<64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i=0; for (; i+7<tiles; i+=8) { const float* bb2p0 = bb2.row(i/8); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" // inch loop "lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = tiles & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "st1 {v12.4s, v13.4s}, [%2], #32 \n" "st1 {v14.4s, v15.4s}, [%3], #32 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "veor q12, q12, q12 \n" "veor q13, q13, q13 \n" "veor q14, q14, q14 \n" "veor q15, q15, q15 \n" // inch loop "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = tiles & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q11, q5, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0]! \n" "vst1.f32 {d20-d23}, [%1]! \n" "vst1.f32 {d24-d27}, [%2]! \n" "vst1.f32 {d28-d31}, [%3]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0_0 = 0.f; float sum0_1 = 0.f; float sum0_2 = 0.f; float sum0_3 = 0.f; float sum0_4 = 0.f; float sum0_5 = 0.f; float sum0_6 = 0.f; float sum0_7 = 0.f; float sum1_0 = 0.f; float sum1_1 = 0.f; float sum1_2 = 0.f; float sum1_3 = 0.f; float sum1_4 = 0.f; float sum1_5 = 0.f; float sum1_6 = 0.f; float sum1_7 = 0.f; float sum2_0 = 0.f; float sum2_1 = 0.f; float sum2_2 = 0.f; float sum2_3 = 0.f; float sum2_4 = 0.f; float sum2_5 = 0.f; float sum2_6 = 0.f; float sum2_7 = 0.f; float sum3_0 = 0.f; float sum3_1 = 0.f; float sum3_2 = 0.f; float sum3_3 = 0.f; float sum3_4 = 0.f; float sum3_5 = 0.f; float sum3_6 = 0.f; float sum3_7 = 0.f; for (int q=0; q<inch; q++) { sum0_0 += bb2p0[0] * ktm0[0]; sum0_1 += bb2p0[1] * ktm0[0]; sum0_2 += bb2p0[2] * ktm0[0]; sum0_3 += bb2p0[3] * ktm0[0]; sum0_4 += bb2p0[4] * ktm0[0]; sum0_5 += bb2p0[5] * ktm0[0]; sum0_6 += bb2p0[6] * ktm0[0]; sum0_7 += bb2p0[7] * ktm0[0]; sum1_0 += bb2p0[0] * ktm0[1]; sum1_1 += bb2p0[1] * ktm0[1]; sum1_2 += bb2p0[2] * ktm0[1]; sum1_3 += bb2p0[3] * ktm0[1]; sum1_4 += bb2p0[4] * ktm0[1]; sum1_5 += bb2p0[5] * ktm0[1]; sum1_6 += bb2p0[6] * ktm0[1]; sum1_7 += bb2p0[7] * ktm0[1]; sum2_0 += bb2p0[0] * ktm0[2]; sum2_1 += bb2p0[1] * ktm0[2]; sum2_2 += bb2p0[2] * ktm0[2]; sum2_3 += bb2p0[3] * ktm0[2]; sum2_4 += bb2p0[4] * ktm0[2]; sum2_5 += bb2p0[5] * ktm0[2]; sum2_6 += bb2p0[6] * ktm0[2]; sum2_7 += bb2p0[7] * ktm0[2]; sum3_0 += bb2p0[0] * ktm0[3]; sum3_1 += bb2p0[1] * ktm0[3]; sum3_2 += bb2p0[2] * ktm0[3]; sum3_3 += bb2p0[3] * ktm0[3]; sum3_4 += bb2p0[4] * ktm0[3]; sum3_5 += bb2p0[5] * ktm0[3]; sum3_6 += bb2p0[6] * ktm0[3]; sum3_7 += bb2p0[7] * ktm0[3]; bb2p0 += 8; ktm0 += 4; } output0_tm[0] = sum0_0; output0_tm[1] = sum0_1; output0_tm[2] = sum0_2; output0_tm[3] = sum0_3; output0_tm[4] = sum0_4; output0_tm[5] = sum0_5; output0_tm[6] = sum0_6; output0_tm[7] = sum0_7; output1_tm[0] = sum1_0; output1_tm[1] = sum1_1; output1_tm[2] = sum1_2; output1_tm[3] = sum1_3; output1_tm[4] = sum1_4; output1_tm[5] = sum1_5; output1_tm[6] = sum1_6; output1_tm[7] = sum1_7; output2_tm[0] = sum2_0; output2_tm[1] = sum2_1; output2_tm[2] = sum2_2; output2_tm[3] = sum2_3; output2_tm[4] = sum2_4; output2_tm[5] = sum2_5; output2_tm[6] = sum2_6; output2_tm[7] = sum2_7; output3_tm[0] = sum3_0; output3_tm[1] = sum3_1; output3_tm[2] = sum3_2; output3_tm[3] = sum3_3; output3_tm[4] = sum3_4; output3_tm[5] = sum3_5; output3_tm[6] = sum3_6; output3_tm[7] = sum3_7; output0_tm += 8; output1_tm += 8; output2_tm += 8; output3_tm += 8; #endif // __ARM_NEON } for (; i+3<tiles; i+=4) { const float* bb2p0 = bb2.row(i/8+(i%8)/4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" // inch loop "lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = tiles & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v10.4s}, [%2], #16 \n" "st1 {v11.4s}, [%3], #16 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" // inch loop "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = tiles & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0]! \n" "vst1.f32 {d18-d19}, [%1]! \n" "vst1.f32 {d20-d21}, [%2]! \n" "vst1.f32 {d22-d23}, [%3]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif // __aarch64__ #else float sum0_0 = 0.f; float sum0_1 = 0.f; float sum0_2 = 0.f; float sum0_3 = 0.f; float sum1_0 = 0.f; float sum1_1 = 0.f; float sum1_2 = 0.f; float sum1_3 = 0.f; float sum2_0 = 0.f; float sum2_1 = 0.f; float sum2_2 = 0.f; float sum2_3 = 0.f; float sum3_0 = 0.f; float sum3_1 = 0.f; float sum3_2 = 0.f; float sum3_3 = 0.f; for (int q=0; q<inch; q++) { sum0_0 += bb2p0[0] * ktm0[0]; sum0_1 += bb2p0[1] * ktm0[0]; sum0_2 += bb2p0[2] * ktm0[0]; sum0_3 += bb2p0[3] * ktm0[0]; sum1_0 += bb2p0[0] * ktm0[1]; sum1_1 += bb2p0[1] * ktm0[1]; sum1_2 += bb2p0[2] * ktm0[1]; sum1_3 += bb2p0[3] * ktm0[1]; sum2_0 += bb2p0[0] * ktm0[2]; sum2_1 += bb2p0[1] * ktm0[2]; sum2_2 += bb2p0[2] * ktm0[2]; sum2_3 += bb2p0[3] * ktm0[2]; sum3_0 += bb2p0[0] * ktm0[3]; sum3_1 += bb2p0[1] * ktm0[3]; sum3_2 += bb2p0[2] * ktm0[3]; sum3_3 += bb2p0[3] * ktm0[3]; bb2p0 += 4; ktm0 += 4; } output0_tm[0] = sum0_0; output0_tm[1] = sum0_1; output0_tm[2] = sum0_2; output0_tm[3] = sum0_3; output1_tm[0] = sum1_0; output1_tm[1] = sum1_1; output1_tm[2] = sum1_2; output1_tm[3] = sum1_3; output2_tm[0] = sum2_0; output2_tm[1] = sum2_1; output2_tm[2] = sum2_2; output2_tm[3] = sum2_3; output3_tm[0] = sum3_0; output3_tm[1] = sum3_1; output3_tm[2] = sum3_2; output3_tm[3] = sum3_3; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; #endif // __ARM_NEON } for (; i<tiles; i++) { const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON float32x4_t _sum0123 = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; #if __aarch64__ _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm1, _bb2p0, 1); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 2); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm3, _bb2p0, 3); #else _sum0123 = vmlaq_lane_f32(_sum0123, _ktm0, vget_low_f32(_bb2p0), 0); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm1, vget_low_f32(_bb2p0), 1); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm2, vget_high_f32(_bb2p0), 0); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm3, vget_high_f32(_bb2p0), 1); #endif // __aarch64__ } for (; q<inch; q++) { float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0); float32x4_t _ktm0 = vld1q_f32(ktm0); _sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0); bb2p0 += 1; ktm0 += 4; } float sum0 = vgetq_lane_f32(_sum0123, 0); float sum1 = vgetq_lane_f32(_sum0123, 1); float sum2 = vgetq_lane_f32(_sum0123, 2); float sum3 = vgetq_lane_f32(_sum0123, 3); #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; for (int q=0; q<inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[0] * ktm0[1]; sum2 += bb2p0[0] * ktm0[2]; sum3 += bb2p0[0] * ktm0[3]; bb2p0 += 1; ktm0 += 4; } #endif // __ARM_NEON output0_tm[0] = sum0; output1_tm[0] = sum1; output2_tm[0] = sum2; output3_tm[0] = sum3; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { #if __ARM_NEON && __aarch64__ const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4+p%4); #else const Mat kernel_tm0 = kernel_tm.channel(p/4+p%4); #endif Mat out0_tm = top_blob_tm.channel(p); float* output0_tm = out0_tm; for (int r=0; r<64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i=0; for (; i+7<tiles; i+=8) { const float* bb2p0 = bb2.row(i/8); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" // inch loop "lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n"// w4 = remain = tiles & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4s, v5.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15" ); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" // inch loop "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" // "vld1.f32 {d24-d27}, [%1 :128]! \n" // "vld1.f32 {d28-d31}, [%1 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = tiles & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0]! \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; float sum4 = 0.f; float sum5 = 0.f; float sum6 = 0.f; float sum7 = 0.f; for (int q=0; q<inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[1] * ktm0[0]; sum2 += bb2p0[2] * ktm0[0]; sum3 += bb2p0[3] * ktm0[0]; sum4 += bb2p0[4] * ktm0[0]; sum5 += bb2p0[5] * ktm0[0]; sum6 += bb2p0[6] * ktm0[0]; sum7 += bb2p0[7] * ktm0[0]; bb2p0 += 8; ktm0 += 1; } output0_tm[0] = sum0; output0_tm[1] = sum1; output0_tm[2] = sum2; output0_tm[3] = sum3; output0_tm[4] = sum4; output0_tm[5] = sum5; output0_tm[6] = sum6; output0_tm[7] = sum7; output0_tm += 8; #endif // __ARM_NEON } for (; i+3<tiles; i+=4) { const float* bb2p0 = bb2.row(i/8+(i%8)/4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" // inch loop "lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n"// w4 = remain = tiles & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #32] \n" "ld1r {v0.4s}, [%5], #4 \n" "fmla v8.4s, v4.4s, v0.4s \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8" ); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" // inch loop "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = tiles & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4]! \n" "pld [%5, #32] \n" "vld1.f32 {d0[],d1[]}, [%5]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0]! \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8" ); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; for (int q=0; q<inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[1] * ktm0[0]; sum2 += bb2p0[2] * ktm0[0]; sum3 += bb2p0[3] * ktm0[0]; bb2p0 += 4; ktm0 += 1; } output0_tm[0] = sum0; output0_tm[1] = sum1; output0_tm[2] = sum2; output0_tm[3] = sum3; output0_tm += 4; #endif // __ARM_NEON } for (; i<tiles; i++) { const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4); const float* ktm0 = kernel_tm0.row(r); int q=0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q+3<inch; q+=4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0); } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = vget_lane_f32(vpadd_f32(_ss0, _ss0), 0); #endif // __aarch64__ #else float sum0 = 0.f; #endif for (; q<inch; q++) { sum0 += bb2p0[0] * ktm0[0]; bb2p0 += 1; ktm0 += 1; } output0_tm[0] = sum0; output0_tm += 1; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = { 4.f, 8.f, 16.f, 32.f }; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { #if __ARM_NEON #if __aarch64__ const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles*8); const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles*16); const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles*24); for (int m=0; m+3<8; m+=4) { float32x4_t _output0_tm_00; float32x4_t _output0_tm_11; float32x4_t _output0_tm_22; float32x4_t _output0_tm_33; float32x4_t _output0_tm_44; float32x4_t _output0_tm_55; float32x4_t _output0_tm_66; float32x4_t _output0_tm_77; _output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0); _output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1); _output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2); _output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0 += out0_tm.w*tiles*25; output0_tm1 += out0_tm.w*tiles*25; output0_tm2 += out0_tm.w*tiles*25; output0_tm3 += out0_tm.w*tiles*25; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m=0; m+1<6; m+=2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8*2; t1 += 8*2; output0 += outw*2; output1 += outw*2; } #else // __aarch64__ const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*8); const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*16); const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*24); const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles*32); const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*40); const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*48); const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*56); float* t0 = tmp[0]; float* t1 = tmp[1]; // int step = out0_tm.w * tiles * 2*4 *4; int step = out0_tm.w * tiles *4; asm volatile( // loop0 // "vld1.f32 {d16-d17}, [%2], %21 \n" // "vld1.f32 {d18-d19}, [%3], %21 \n" // "vld1.f32 {d20-d21}, [%4], %21 \n" // "vld1.f32 {d22-d23}, [%5], %21 \n" // "vld1.f32 {d24-d25}, [%6], %21 \n" // "vld1.f32 {d26-d27}, [%7], %21 \n" // "vld1.f32 {d28-d29}, [%8], %21 \n" // "vld1.f32 {d30-d31}, [%9], %21 \n" // "vtrn.32 q8, q10 \n" // "vtrn.32 q9, q11 \n" // "vtrn.32 q12, q14 \n" // "vtrn.32 q13, q15 \n" // "vswp d17, d24 \n" // "vswp d19, d26 \n" // "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 // "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vld1.f32 {d16[0]}, [%2], %21 \n" "vld1.f32 {d16[1]}, [%3], %21 \n" "vld1.f32 {d17[0]}, [%4], %21 \n" "vld1.f32 {d17[1]}, [%5], %21 \n" "vld1.f32 {d20[0]}, [%2], %21 \n" "vld1.f32 {d20[1]}, [%3], %21 \n" "vld1.f32 {d21[0]}, [%4], %21 \n" "vld1.f32 {d21[1]}, [%5], %21 \n" "vld1.f32 {d24[0]}, [%2], %21 \n" "vld1.f32 {d24[1]}, [%3], %21 \n" "vld1.f32 {d25[0]}, [%4], %21 \n" "vld1.f32 {d25[1]}, [%5], %21 \n" "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vld1.f32 {d28[0]}, [%2], %21 \n" "vld1.f32 {d28[1]}, [%3], %21 \n" "vld1.f32 {d29[0]}, [%4], %21 \n" "vld1.f32 {d29[1]}, [%5], %21 \n" "vld1.f32 {d18[0]}, [%2], %21 \n" "vld1.f32 {d18[1]}, [%3], %21 \n" "vld1.f32 {d19[0]}, [%4], %21 \n" "vld1.f32 {d19[1]}, [%5], %21 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vld1.f32 {d22[0]}, [%2], %21 \n" "vld1.f32 {d22[1]}, [%3], %21 \n" "vld1.f32 {d23[0]}, [%4], %21 \n" "vld1.f32 {d23[1]}, [%5], %21 \n" "vld1.f32 {d26[0]}, [%2], %21 \n" "vld1.f32 {d26[1]}, [%3], %21 \n" "vld1.f32 {d27[0]}, [%4], %21 \n" "vld1.f32 {d27[1]}, [%5], %21 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14 "vld1.f32 {d30[0]}, [%2] \n" "vld1.f32 {d30[1]}, [%3] \n" "vld1.f32 {d31[0]}, [%4] \n" "vld1.f32 {d31[1]}, [%5] \n" "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "sub %0, %0, #112 \n" "vst1.f32 {d30-d31}, [%1] \n" "sub %1, %1, #112 \n" // loop1 // "vld1.f32 {d16-d17}, [%2] \n" // "vld1.f32 {d18-d19}, [%3] \n" // "vld1.f32 {d20-d21}, [%4] \n" // "vld1.f32 {d22-d23}, [%5] \n" // "vld1.f32 {d24-d25}, [%6] \n" // "vld1.f32 {d26-d27}, [%7] \n" // "vld1.f32 {d28-d29}, [%8] \n" // "vld1.f32 {d30-d31}, [%9] \n" // "vtrn.32 q8, q10 \n" // "vtrn.32 q9, q11 \n" // "vtrn.32 q12, q14 \n" // "vtrn.32 q13, q15 \n" // "vswp d17, d24 \n" // "vswp d19, d26 \n" // "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 // "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vld1.f32 {d16[0]}, [%6], %21 \n" "vld1.f32 {d16[1]}, [%7], %21 \n" "vld1.f32 {d17[0]}, [%8], %21 \n" "vld1.f32 {d17[1]}, [%9], %21 \n" "vld1.f32 {d20[0]}, [%6], %21 \n" "vld1.f32 {d20[1]}, [%7], %21 \n" "vld1.f32 {d21[0]}, [%8], %21 \n" "vld1.f32 {d21[1]}, [%9], %21 \n" "vld1.f32 {d24[0]}, [%6], %21 \n" "vld1.f32 {d24[1]}, [%7], %21 \n" "vld1.f32 {d25[0]}, [%8], %21 \n" "vld1.f32 {d25[1]}, [%9], %21 \n" "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vld1.f32 {d28[0]}, [%6], %21 \n" "vld1.f32 {d28[1]}, [%7], %21 \n" "vld1.f32 {d29[0]}, [%8], %21 \n" "vld1.f32 {d29[1]}, [%9], %21 \n" "vld1.f32 {d18[0]}, [%6], %21 \n" "vld1.f32 {d18[1]}, [%7], %21 \n" "vld1.f32 {d19[0]}, [%8], %21 \n" "vld1.f32 {d19[1]}, [%9], %21 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vld1.f32 {d22[0]}, [%6], %21 \n" "vld1.f32 {d22[1]}, [%7], %21 \n" "vld1.f32 {d23[0]}, [%8], %21 \n" "vld1.f32 {d23[1]}, [%9], %21 \n" "vld1.f32 {d26[0]}, [%6], %21 \n" "vld1.f32 {d26[1]}, [%7], %21 \n" "vld1.f32 {d27[0]}, [%8], %21 \n" "vld1.f32 {d27[1]}, [%9], %21 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14 "vld1.f32 {d30[0]}, [%6] \n" "vld1.f32 {d30[1]}, [%7] \n" "vld1.f32 {d31[0]}, [%8] \n" "vld1.f32 {d31[1]}, [%9] \n" "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "vst1.f32 {d30-d31}, [%1] \n" : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(output0_tm0_0), // %2 "=r"(output0_tm1_0), // %3 "=r"(output0_tm2_0), // %4 "=r"(output0_tm3_0), // %5 "=r"(output0_tm0_4), // %6 "=r"(output0_tm1_4), // %7 "=r"(output0_tm2_4), // %8 "=r"(output0_tm3_4) // %9 : "0"(t0), "1"(t1), "2"(output0_tm0_0), "3"(output0_tm1_0), "4"(output0_tm2_0), "5"(output0_tm3_0), "6"(output0_tm0_4), "7"(output0_tm1_4), "8"(output0_tm2_4), "9"(output0_tm3_4), "w"(_coeff), // %20 "r"(step) // %21 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; int stepw = outw*2 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop1 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop2 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(t0), // %2 "=r"(t1) // %3 : "0"(output0), "1"(output1), "2"(t0), "3"(t1), "w"(_coeff), // %8 "w"(_bias0), // %9 "r"(stepw) // %10 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm_1 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm_2 = out0_tm.row(i * w_tm/8 + j + tiles*2); const float* output0_tm_3 = out0_tm.row(i * w_tm/8 + j + tiles*3); const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles*4); const float* output0_tm_5 = out0_tm.row(i * w_tm/8 + j + tiles*5); const float* output0_tm_6 = out0_tm.row(i * w_tm/8 + j + tiles*6); const float* output0_tm_7 = out0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m<8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 8; output0_tm_1 += out0_tm.w * tiles * 8; output0_tm_2 += out0_tm.w * tiles * 8; output0_tm_3 += out0_tm.w * tiles * 8; output0_tm_4 += out0_tm.w * tiles * 8; output0_tm_5 += out0_tm.w * tiles * 8; output0_tm_6 += out0_tm.w * tiles * 8; output0_tm_7 += out0_tm.w * tiles * 8; } float* output0 = out0.row(i * 6) + j * 6; for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad if (top_blob_bordered.w != top_blob.w || top_blob_bordered.h != top_blob.h) copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p*inch*9; const float* k1 = kernel + (p+1)*inch*9; for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0+3); float32x4_t _k06 = vld1q_f32(k0+6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1+3); float32x4_t _k16 = vld1q_f32(k1+6); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0 "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n"// v6 = _sum0 "fmul v12.4s, v8.4s, %12.s[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n"// v7 = _sum1 "fmul v13.4s, v8.4s, %15.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld2 {v10.4s, v11.4s}, [%3] \n"// v10 "fmla v6.4s, v9.4s, %12.s[1] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v7.4s, v9.4s, %15.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n"// r1 "fmla v12.4s, v14.4s, %12.s[2] \n" "fmla v13.4s, v14.4s, %15.s[2] \n" "prfm pldl1keep, [%4, #128] \n" "ld2 {v10.4s, v11.4s}, [%4] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "fmla v7.4s, v8.4s, %16.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v12.4s, v9.4s, %13.s[1] \n" "fmla v13.4s, v9.4s, %16.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n"// r2 "fmla v6.4s, v14.4s, %13.s[2] \n" "fmla v7.4s, v14.4s, %16.s[2] \n" "prfm pldl1keep, [%5, #128] \n" "ld2 {v10.4s, v11.4s}, [%5] \n" "fmla v12.4s, v8.4s, %14.s[0] \n" "fmla v13.4s, v8.4s, %17.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v6.4s, v9.4s, %14.s[1] \n" "fmla v7.4s, v9.4s, %17.s[1] \n" "fmla v12.4s, v14.4s, %14.s[2] \n" "fmla v13.4s, v14.4s, %17.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0 "fadd v6.4s, v6.4s, v12.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "bne 0b \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0 "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n"// q6 = _sum0 "vmul.f32 q12, q8, %e12[0] \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n"// q7 = _sum1 "vmul.f32 q13, q8, %e15[0] \n" "pld [%3, #128] \n" "vld2.f32 {d20-d21}, [%3] \n"// q10 "vmla.f32 q6, q9, %e12[1] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q7, q9, %e15[1] \n" "pld [%4, #256] \n" "vld2.f32 {d16-d19}, [%4]! \n"// r1 "vmla.f32 q12, q11, %f12[0] \n" "vmla.f32 q13, q11, %f15[0] \n" "pld [%4, #128] \n" "vld2.f32 {d20-d21}, [%4] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q12, q9, %e13[1] \n" "vmla.f32 q13, q9, %e16[1] \n" "pld [%5, #256] \n" "vld2.f32 {d16-d19}, [%5]! \n"// r2 "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "pld [%5, #128] \n" "vld2.f32 {d20-d21}, [%5] \n" "vmla.f32 q12, q8, %e14[0] \n" "vmla.f32 q13, q8, %e17[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q6, q9, %e14[1] \n" "vmla.f32 q7, q9, %e17[1] \n" "vmla.f32 q12, q11, %f14[0] \n" "vmla.f32 q13, q11, %f17[0] \n" "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0 "vadd.f32 q6, q6, q12 \n" "vadd.f32 q7, q7, q13 \n" "subs %0, #1 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "bne 0b \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr0++; outptr1++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9; k1 += 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p*inch*9; for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmla v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmul v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } } static void conv3x3s2_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*9, inch, outch/8 + outch%8); const float* kernel = _kernel; int p=0; for (; p+7<outch; p+=8) { const float* k0 = kernel + (p+0)*inch*9; const float* k1 = kernel + (p+1)*inch*9; const float* k2 = kernel + (p+2)*inch*9; const float* k3 = kernel + (p+3)*inch*9; const float* k4 = kernel + (p+4)*inch*9; const float* k5 = kernel + (p+5)*inch*9; const float* k6 = kernel + (p+6)*inch*9; const float* k7 = kernel + (p+7)*inch*9; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*9; float* ktmp = kernel_tm.channel(p/8 + p%8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; // const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p+0); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); const float bias0 = bias ? bias[p+0] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; const float bias4 = bias ? bias[p+4] : 0.f; const float bias5 = bias ? bias[p+5] : 0.f; const float bias6 = bias ? bias[p+6] : 0.f; const float bias7 = bias ? bias[p+7] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); out6.fill(bias6); out7.fill(bias7); const float* ktmp = _kernel.channel(p/8); for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v8.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v9.4s}, [%2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v10.4s}, [%3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v11.4s}, [%4] \n" /// "prfm pldl1keep, [%9, #256] \n" "ld2 {v4.4s, v5.4s}, [%9], #32 \n"// v4=00 v5=01 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v12.4s}, [%5] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v13.4s}, [%6] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v14.4s}, [%7] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v15.4s}, [%8] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld2 {v6.4s, v7.4s}, [%9] \n"// v6 "fmla v8.4s, v5.4s, v2.s[0] \n" "fmla v9.4s, v5.4s, v2.s[1] \n" "fmla v10.4s, v5.4s, v2.s[2] \n" "fmla v11.4s, v5.4s, v2.s[3] \n" "ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=02 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v5.4s, v3.s[0] \n" "fmla v13.4s, v5.4s, v3.s[1] \n" "fmla v14.4s, v5.4s, v3.s[2] \n" "fmla v15.4s, v5.4s, v3.s[3] \n" /// "prfm pldl1keep, [%10, #256] \n" "ld2 {v4.4s, v5.4s}, [%10], #32 \n"// v4=10 v5=11 "fmla v8.4s, v6.4s, v0.s[0] \n" "fmla v9.4s, v6.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v6.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v6.4s, v1.s[0] \n" "fmla v13.4s, v6.4s, v1.s[1] \n" "fmla v14.4s, v6.4s, v1.s[2] \n" "fmla v15.4s, v6.4s, v1.s[3] \n" "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "prfm pldl1keep, [%10, #256] \n" "ld2 {v6.4s, v7.4s}, [%10] \n"// v6 "fmla v8.4s, v5.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "fmla v10.4s, v5.4s, v0.s[2] \n" "fmla v11.4s, v5.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=12 "fmla v12.4s, v5.4s, v1.s[0] \n" "fmla v13.4s, v5.4s, v1.s[1] \n" "fmla v14.4s, v5.4s, v1.s[2] \n" "fmla v15.4s, v5.4s, v1.s[3] \n" /// "prfm pldl1keep, [%11, #256] \n" "ld2 {v4.4s, v5.4s}, [%11], #32 \n"// v4=20 v5=21 "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v6.4s, v3.s[0] \n" "fmla v13.4s, v6.4s, v3.s[1] \n" "fmla v14.4s, v6.4s, v3.s[2] \n" "fmla v15.4s, v6.4s, v3.s[3] \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "prfm pldl1keep, [%11, #256] \n" "ld2 {v6.4s, v7.4s}, [%11] \n"// v6 "fmla v8.4s, v5.4s, v2.s[0] \n" "fmla v9.4s, v5.4s, v2.s[1] \n" "fmla v10.4s, v5.4s, v2.s[2] \n" "fmla v11.4s, v5.4s, v2.s[3] \n" "ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=22 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v5.4s, v3.s[0] \n" "fmla v13.4s, v5.4s, v3.s[1] \n" "fmla v14.4s, v5.4s, v3.s[2] \n" "fmla v15.4s, v5.4s, v3.s[3] \n" "fmla v8.4s, v6.4s, v0.s[0] \n" "fmla v9.4s, v6.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v6.4s, v0.s[3] \n" "fmla v12.4s, v6.4s, v1.s[0] \n" "fmla v13.4s, v6.4s, v1.s[1] \n" "st1 {v8.4s}, [%1], #16 \n" "st1 {v9.4s}, [%2], #16 \n" "fmla v14.4s, v6.4s, v1.s[2] \n" "fmla v15.4s, v6.4s, v1.s[3] \n" "st1 {v10.4s}, [%3], #16 \n" "st1 {v11.4s}, [%4], #16 \n" "sub %12, %12, #288 \n" "st1 {v12.4s}, [%5], #16 \n" "st1 {v13.4s}, [%6], #16 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s}, [%7], #16 \n" "st1 {v15.4s}, [%8], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.f32 {d16-d17}, [%1] \n" "pld [%2, #128] \n" "vld1.f32 {d18-d19}, [%2] \n" "pld [%3, #128] \n" "vld1.f32 {d20-d21}, [%3] \n" "pld [%4, #128] \n" "vld1.f32 {d22-d23}, [%4] \n" /// "pld [%9, #256] \n" "vld2.f32 {d8-d11}, [%9]! \n"// q4=00 q5=01 "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "pld [%5, #128] \n" "vld1.f32 {d24-d25}, [%5] \n" "pld [%6, #128] \n" "vld1.f32 {d26-d27}, [%6] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "pld [%7, #128] \n" "vld1.f32 {d28-d29}, [%7] \n" "pld [%8, #128] \n" "vld1.f32 {d30-d31}, [%8] \n" "vld1.f32 {d4-d7}, [%12 :128]! \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "pld [%9, #128] \n" "vld2.f32 {d12-d13}, [%9] \n"// q6 "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vext.f32 q6, q4, q6, #1 \n"// q6=02 "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" /// "pld [%10, #256] \n" "vld2.f32 {d8-d11}, [%10]! \n"// q4=10 q5=11 "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vld1.f32 {d4-d7}, [%12 :128]! \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q9, q4, d4[1] \n" "vmla.f32 q10, q4, d5[0] \n" "vmla.f32 q11, q4, d5[1] \n" "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q12, q4, d6[0] \n" "vmla.f32 q13, q4, d6[1] \n" "vmla.f32 q14, q4, d7[0] \n" "vmla.f32 q15, q4, d7[1] \n" "pld [%10, #128] \n" "vld2.f32 {d12-d13}, [%10] \n"// q6 "vmla.f32 q8, q5, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "vmla.f32 q10, q5, d1[0] \n" "vmla.f32 q11, q5, d1[1] \n" "vld1.f32 {d4-d7}, [%12 :128]! \n" "vext.f32 q6, q4, q6, #1 \n"// q6=12 "vmla.f32 q12, q5, d2[0] \n" "vmla.f32 q13, q5, d2[1] \n" "vmla.f32 q14, q5, d3[0] \n" "vmla.f32 q15, q5, d3[1] \n" /// "pld [%11, #256] \n" "vld2.f32 {d8-d11}, [%11]! \n"// q4=20 q5=21 "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q12, q6, d6[0] \n" "vmla.f32 q13, q6, d6[1] \n" "vmla.f32 q14, q6, d7[0] \n" "vmla.f32 q15, q6, d7[1] \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vld1.f32 {d4-d7}, [%12 :128]! \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "pld [%11, #128] \n" "vld2.f32 {d12-d13}, [%11] \n"// q6 "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vext.f32 q6, q4, q6, #1 \n"// q6=22 "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vst1.f32 {d16-d17}, [%1]! \n" "vst1.f32 {d18-d19}, [%2]! \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "vst1.f32 {d20-d21}, [%3]! \n" "vst1.f32 {d22-d23}, [%4]! \n" "sub %12, %12, #288 \n" "vst1.f32 {d24-d25}, [%5]! \n" "vst1.f32 {d26-d27}, [%6]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d29}, [%7]! \n" "vst1.f32 {d30-d31}, [%8]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v0.4s}, [%8] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "ld1 {v8.s}[0], [%0] \n" "ld1 {v8.s}[1], [%1] \n" "ld1 {v8.s}[2], [%2] \n" "ld1 {v8.s}[3], [%3] \n" "fmul v14.4s, v10.4s, v0.s[0] \n" "fmul v15.4s, v11.4s, v0.s[0] \n" "ld1 {v9.s}[0], [%4] \n" "ld1 {v9.s}[1], [%5] \n" "ld1 {v9.s}[2], [%6] \n" "ld1 {v9.s}[3], [%7] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v0.s[1] \n" "fmla v9.4s, v13.4s, v0.s[1] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v0.s[2] \n" "fmla v15.4s, v11.4s, v0.s[2] \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v1.4s}, [%9] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v1.s[0] \n" "fmla v9.4s, v13.4s, v1.s[0] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v1.s[1] \n" "fmla v15.4s, v11.4s, v1.s[1] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v1.s[2] \n" "fmla v9.4s, v13.4s, v1.s[2] \n" "prfm pldl1keep, [%10, #128] \n" "ld1 {v0.4s}, [%10] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v0.s[0] \n" "fmla v15.4s, v11.4s, v0.s[0] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v0.s[1] \n" "fmla v9.4s, v13.4s, v0.s[1] \n" "fmla v14.4s, v10.4s, v0.s[2] \n" "fmla v15.4s, v11.4s, v0.s[2] \n" "fadd v8.4s, v8.4s, v14.4s \n" "fadd v9.4s, v9.4s, v15.4s \n" "sub %11, %11, #288 \n" "st1 {v8.s}[0], [%0], #4 \n" "st1 {v8.s}[1], [%1], #4 \n" "st1 {v8.s}[2], [%2], #4 \n" "st1 {v8.s}[3], [%3], #4 \n" "st1 {v9.s}[0], [%4], #4 \n" "st1 {v9.s}[1], [%5], #4 \n" "st1 {v9.s}[2], [%6], #4 \n" "st1 {v9.s}[3], [%7], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); #else // __aarch64__ asm volatile( "vld1.f32 {d20-d23}, [%11 :128]! \n" "pld [%8, #128] \n" "vld1.f32 {d0-d1}, [%8] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vld1.f32 {d16[0]}, [%0] \n" "vld1.f32 {d16[1]}, [%1] \n" "vld1.f32 {d17[0]}, [%2] \n" "vld1.f32 {d17[1]}, [%3] \n" "vmul.f32 q14, q10, d0[0] \n" "vmul.f32 q15, q11, d0[0] \n" "vld1.f32 {d18[0]}, [%4] \n" "vld1.f32 {d18[1]}, [%5] \n" "vld1.f32 {d19[0]}, [%6] \n" "vld1.f32 {d19[1]}, [%7] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d0[1] \n" "vmla.f32 q9, q13, d0[1] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[0] \n" "pld [%9, #128] \n" "vld1.f32 {d2-d3}, [%9] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d2[0] \n" "vmla.f32 q9, q13, d2[0] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d2[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d3[0] \n" "vmla.f32 q9, q13, d3[0] \n" "pld [%10, #128] \n" "vld1.f32 {d0-d1}, [%10] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q11, d0[0] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d0[1] \n" "vmla.f32 q9, q13, d0[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[0] \n" "vadd.f32 q8, q8, q14 \n" "vadd.f32 q9, q9, q15 \n" "sub %11, %11, #288 \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%1]! \n" "vst1.f32 {d17[0]}, [%2]! \n" "vst1.f32 {d17[1]}, [%3]! \n" "vst1.f32 {d18[0]}, [%4]! \n" "vst1.f32 {d18[1]}, [%5]! \n" "vst1.f32 {d19[0]}, [%6]! \n" "vst1.f32 {d19[1]}, [%7]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else // __ARM_NEON float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; float sum4 = 0.f; float sum5 = 0.f; float sum6 = 0.f; float sum7 = 0.f; sum0 += r0[0] * ktmp[0]; sum1 += r0[0] * ktmp[1]; sum2 += r0[0] * ktmp[2]; sum3 += r0[0] * ktmp[3]; sum4 += r0[0] * ktmp[4]; sum5 += r0[0] * ktmp[5]; sum6 += r0[0] * ktmp[6]; sum7 += r0[0] * ktmp[7]; ktmp += 8; sum0 += r0[1] * ktmp[0]; sum1 += r0[1] * ktmp[1]; sum2 += r0[1] * ktmp[2]; sum3 += r0[1] * ktmp[3]; sum4 += r0[1] * ktmp[4]; sum5 += r0[1] * ktmp[5]; sum6 += r0[1] * ktmp[6]; sum7 += r0[1] * ktmp[7]; ktmp += 8; sum0 += r0[2] * ktmp[0]; sum1 += r0[2] * ktmp[1]; sum2 += r0[2] * ktmp[2]; sum3 += r0[2] * ktmp[3]; sum4 += r0[2] * ktmp[4]; sum5 += r0[2] * ktmp[5]; sum6 += r0[2] * ktmp[6]; sum7 += r0[2] * ktmp[7]; ktmp += 8; sum0 += r1[0] * ktmp[0]; sum1 += r1[0] * ktmp[1]; sum2 += r1[0] * ktmp[2]; sum3 += r1[0] * ktmp[3]; sum4 += r1[0] * ktmp[4]; sum5 += r1[0] * ktmp[5]; sum6 += r1[0] * ktmp[6]; sum7 += r1[0] * ktmp[7]; ktmp += 8; sum0 += r1[1] * ktmp[0]; sum1 += r1[1] * ktmp[1]; sum2 += r1[1] * ktmp[2]; sum3 += r1[1] * ktmp[3]; sum4 += r1[1] * ktmp[4]; sum5 += r1[1] * ktmp[5]; sum6 += r1[1] * ktmp[6]; sum7 += r1[1] * ktmp[7]; ktmp += 8; sum0 += r1[2] * ktmp[0]; sum1 += r1[2] * ktmp[1]; sum2 += r1[2] * ktmp[2]; sum3 += r1[2] * ktmp[3]; sum4 += r1[2] * ktmp[4]; sum5 += r1[2] * ktmp[5]; sum6 += r1[2] * ktmp[6]; sum7 += r1[2] * ktmp[7]; ktmp += 8; sum0 += r2[0] * ktmp[0]; sum1 += r2[0] * ktmp[1]; sum2 += r2[0] * ktmp[2]; sum3 += r2[0] * ktmp[3]; sum4 += r2[0] * ktmp[4]; sum5 += r2[0] * ktmp[5]; sum6 += r2[0] * ktmp[6]; sum7 += r2[0] * ktmp[7]; ktmp += 8; sum0 += r2[1] * ktmp[0]; sum1 += r2[1] * ktmp[1]; sum2 += r2[1] * ktmp[2]; sum3 += r2[1] * ktmp[3]; sum4 += r2[1] * ktmp[4]; sum5 += r2[1] * ktmp[5]; sum6 += r2[1] * ktmp[6]; sum7 += r2[1] * ktmp[7]; ktmp += 8; sum0 += r2[2] * ktmp[0]; sum1 += r2[2] * ktmp[1]; sum2 += r2[2] * ktmp[2]; sum3 += r2[2] * ktmp[3]; sum4 += r2[2] * ktmp[4]; sum5 += r2[2] * ktmp[5]; sum6 += r2[2] * ktmp[6]; sum7 += r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8*9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8*9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* ktmp = _kernel.channel(p/8 + p%8); for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* k0 = ktmp; const float* k1 = ktmp + 3; const float* k2 = ktmp + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmla v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmul v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * ktmp[0]; sum += r0[1] * ktmp[1]; sum += r0[2] * ktmp[2]; sum += r1[0] * ktmp[3]; sum += r1[1] * ktmp[4]; sum += r1[2] * ktmp[5]; sum += r2[0] * ktmp[6]; sum += r2[1] * ktmp[7]; sum += r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } }
pmmomp.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif #include <string.h> void printMatriz (int n, int **m) { int i, j; for (i=0; i<n; i++) { for (j=0; j<n; j++) printf("%d ", m[i][j]); printf("\n"); } } int main(int argc, char const *argv[]) { if (argc < 2) { fprintf(stderr, "ERROR: falta numero de filas y columnas\n"); exit(1); } unsigned n, i, j, k; n = strtol(argv[1], NULL, 10); int **a, **b, **c; a = (int **) malloc(n*sizeof(int*)); b = (int **) malloc(n*sizeof(int*)); c = (int **) malloc(n*sizeof(int*)); for (i=0; i<n; i++) { a[i] = (int *) malloc(n*sizeof(int)); b[i] = (int *) malloc(n*sizeof(int)); c[i] = (int *) malloc(n*sizeof(int)); } // Inicializacion #pragma omp parallel for private(j) for (i=0; i<n; i++) { for (j=0; j<n; j++) { a[i][j] = 0; b[i][j] = /*i+1*/1; c[i][j] = /*j+1*/2; } } // Multiplicacion double start, end, tiempo; start = omp_get_wtime(); #pragma omp parallel for private(k,j) for (i=0; i<n; i++) for (j=0; j<n; j++) for (k=0; k<n; k++) a[i][j] += b[i][k] * c[k][j]; end = omp_get_wtime(); tiempo = end - start; if (n < 15) { printf("M1:\n"); printMatriz(n, b); printf("M2:\n"); printMatriz(n, c); printf("Sol:\n"); printMatriz(n, a); } else printf("Tiempo = %11.9f\t Primera = %d\t Ultima=%d\n",tiempo,a[0][0],a[n-1][n-1]); return 0; }
DRB015-outofbounds-var-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The outmost loop is be parallelized. But the inner level loop has out of bound access for b[i][j] when j equals to 0. This will case memory access of a previous row's last element. For example, an array of 4x4: j=0 1 2 3 i=0 x x x x 1 x x x x 2 x x x x 3 x x x x outer loop: i=2, inner loop: j=0 array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3] due to linearized row-major storage of the 2-D array. This causes loop-carried data dependence between i=2 and i=1. Data race pair: b[i][j]@80:7 vs. b[i][j-1]@80:15 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char * argv[]) { int i, j; int len = 100; int n = len, m = len; double b[n][m]; int _ret_val_0; if (argc>1) { len=atoi(argv[1]); } #pragma cetus private(i, j) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<n; i ++ ) { #pragma cetus private(j) #pragma loop name main#0#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<m; j ++ ) { b[i][j]=((i*m)+j); } } #pragma cetus private(i, j) #pragma loop name main#1 for (i=1; i<n; i ++ ) { #pragma cetus private(j) #pragma loop name main#1#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<m; j ++ ) { b[i][j]=b[i-1][j]; } } #pragma cetus private(i, j) #pragma loop name main#2 for (i=0; i<n; i ++ ) { #pragma cetus private(j) #pragma loop name main#2#0 for (j=0; j<m; j ++ ) { printf("%lf\n", b[i][j]); } } _ret_val_0=0; return _ret_val_0; }